NonMoving.c 50.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
/* -----------------------------------------------------------------------------
 *
 * (c) The GHC Team, 1998-2018
 *
 * Non-moving garbage collector and allocator
 *
 * ---------------------------------------------------------------------------*/

#include "Rts.h"
#include "RtsUtils.h"
#include "Capability.h"
#include "Printer.h"
#include "Storage.h"
// We call evacuate, which expects the thread-local gc_thread to be valid;
// This is sometimes declared as a register variable therefore it is necessary
// to include the declaration so that the compiler doesn't clobber the register.
#include "GCThread.h"
#include "GCTDecl.h"
#include "Schedule.h"

#include "NonMoving.h"
#include "NonMovingMark.h"
#include "NonMovingSweep.h"
24
#include "NonMovingCensus.h"
25 26 27 28 29 30 31 32 33 34 35 36
#include "StablePtr.h" // markStablePtrTable
#include "Schedule.h" // markScheduler
#include "Weak.h" // dead_weak_ptr_list

struct NonmovingHeap nonmovingHeap;

uint8_t nonmovingMarkEpoch = 1;

static void nonmovingBumpEpoch(void) {
    nonmovingMarkEpoch = nonmovingMarkEpoch == 1 ? 2 : 1;
}

37 38 39 40 41 42 43 44 45 46 47 48
#if defined(THREADED_RTS)
/*
 * This mutex ensures that only one non-moving collection is active at a time.
 */
Mutex nonmoving_collection_mutex;

OSThreadId mark_thread;
bool concurrent_coll_running = false;
Condition concurrent_coll_finished;
Mutex concurrent_coll_finished_lock;
#endif

49 50 51
/*
 * Note [Non-moving garbage collector]
 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Ben Gamari's avatar
Ben Gamari committed
52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202
 * The sources rts/NonMoving*.c implement GHC's non-moving garbage collector
 * for the oldest generation. In contrast to the throughput-oriented moving
 * collector, the non-moving collector is designed to achieve low GC latencies
 * on large heaps. It accomplishes low-latencies by way of a concurrent
 * mark-and-sweep collection strategy on a specially-designed heap structure.
 * While the design is described in detail in the design document found in
 * docs/storage/nonmoving-gc, we briefly summarize the structure here.
 *
 *
 * === Heap Structure ===
 *
 * The nonmoving heap (embodied by struct NonmovingHeap) consists of a family
 * of allocators, each serving a range of allocation sizes. Each allocator
 * consists of a set of *segments*, each of which contain fixed-size *blocks*
 * (not to be confused with "blocks" provided by GHC's block allocator; this is
 * admittedly an unfortunate overlap in terminology).  These blocks are the
 * backing store for the allocator. In addition to blocks, the segment also
 * contains some header information (see struct NonmovingSegment in
 * NonMoving.h). This header contains a *bitmap* encoding one byte per block
 * (used by the collector to record liveness), as well as the index of the next
 * unallocated block (and a *snapshot* of this field which will be described in
 * the next section).
 *
 * Each allocator maintains three sets of segments:
 *
 *  - A *current* segment for each capability; this is the segment which that
 *    capability will allocate into.
 *
 *  - A pool of *active* segments, each of which containing at least one
 *    unallocated block. The allocate will take a segment from this pool when
 *    it fills its *current* segment.
 *
 *  - A set of *filled* segments, which contain no unallocated blocks and will
 *    be collected during the next major GC cycle
 *
 * Storage for segments is allocated using the block allocator using an aligned
 * group of NONMOVING_SEGMENT_BLOCKS blocks. This makes the task of locating
 * the segment header for a clone a simple matter of bit-masking (as
 * implemented by nonmovingGetSegment).
 *
 * In addition, to relieve pressure on the block allocator we keep a small pool
 * of free blocks around (nonmovingHeap.free) which can be pushed/popped
 * to/from in a lock-free manner.
 *
 *
 * === Allocation ===
 *
 * The allocator (as implemented by nonmovingAllocate) starts by identifying
 * which allocator the request should be made against. It then allocates into
 * its local current segment and bumps the next_free pointer to point to the
 * next unallocated block (as indicated by the bitmap). If it finds the current
 * segment is now full it moves it to the filled list and looks for a new
 * segment to make current from a few sources:
 *
 *  1. the allocator's active list (see pop_active_segment)
 *  2. the nonmoving heap's free block pool (see nonmovingPopFreeSegment)
 *  3. allocate a new segment from the block allocator (see
 *     nonmovingAllocSegment)
 *
 * Note that allocation does *not* involve modifying the bitmap. The bitmap is
 * only modified by the collector.
 *
 *
 * === Snapshot invariant ===
 *
 * To safely collect in a concurrent setting, the collector relies on the
 * notion of a *snapshot*. The snapshot is a hypothetical frozen state of the
 * heap topology taken at the beginning of the major collection cycle.
 * With this definition we require the following property of the mark phase,
 * which we call the *snapshot invariant*,
 *
 *     All objects that were reachable at the time the snapshot was collected
 *     must have their mark bits set at the end of the mark phase.
 *
 * As the mutator might change the topology of the heap while we are marking
 * this property requires some cooperation from the mutator to maintain.
 * Specifically, we rely on a write barrier as described in Note [Update
 * remembered set].
 *
 * To determine which objects were existent when the snapshot was taken we
 * record a snapshot of each segments next_free pointer at the beginning of
 * collection.
 *
 *
 * === Collection ===
 *
 * Collection happens in a few phases some of which occur during a
 * stop-the-world period (marked with [STW]) and others which can occur
 * concurrently with mutation and minor collection (marked with [CONC]):
 *
 *  1. [STW] Preparatory GC: Here we do a standard minor collection of the
 *     younger generations (which may evacuate things to the nonmoving heap).
 *     References from younger generations into the nonmoving heap are recorded
 *     in the mark queue (see Note [Aging under the non-moving collector] in
 *     this file).
 *
 *  2. [STW] Snapshot update: Here we update the segment snapshot metadata
 *     (see nonmovingPrepareMark) and move the filled segments to
 *     nonmovingHeap.sweep_list, which is the set of segments which we will
 *     sweep this GC cycle.
 *
 *  3. [STW] Root collection: Here we walk over a variety of root sources
 *     and add them to the mark queue (see nonmovingCollect).
 *
 *  4. [CONC] Concurrent marking: Here we do the majority of marking concurrently
 *     with mutator execution (but with the write barrier enabled; see
 *     Note [Update remembered set]).
 *
 *  5. [STW] Final sync: Here we interrupt the mutators, ask them to
 *     flush their final update remembered sets, and mark any new references
 *     we find.
 *
 *  6. [CONC] Sweep: Here we walk over the nonmoving segments on sweep_list
 *     and place them back on either the active, current, or filled list,
 *     depending upon how much live data they contain.
 *
 *
 * === Marking ===
 *
 * Ignoring large and static objects, marking a closure is fairly
 * straightforward (implemented in NonMovingMark.c:mark_closure):
 *
 *  1. Check whether the closure is in the non-moving generation; if not then
 *     we ignore it.
 *  2. Find the segment containing the closure's block.
 *  3. Check whether the closure's block is above $seg->next_free_snap; if so
 *     then the block was not allocated when we took the snapshot and therefore
 *     we don't need to mark it.
 *  4. Check whether the block's bitmap bits is equal to nonmovingMarkEpoch. If
 *     so then we can stop as we have already marked it.
 *  5. Push the closure's pointers to the mark queue.
 *  6. Set the blocks bitmap bits to nonmovingMarkEpoch.
 *
 * Note that the ordering of (5) and (6) is rather important, as described in
 * Note [StgStack dirtiness flags and concurrent marking].
 *
 *
 * === Other references ===
 *
 * Apart from the design document in docs/storage/nonmoving-gc and the Ueno
 * 2016 paper (TODO citation) from which it drew inspiration, there are a
 * variety of other relevant Notes scattered throughout the tree:
 *
 *  - Note [Concurrent non-moving collection] (NonMoving.c) describes
 *    concurrency control of the nonmoving collector
 *
 *  - Note [Live data accounting in nonmoving collector] (NonMoving.c)
 *    describes how we track the quantity of live data in the nonmoving
 *    generation.
 *
 *  - Note [Aging under the non-moving collector] (NonMoving.c) describes how
Brian Wignall's avatar
Brian Wignall committed
203
 *    we accommodate aging
Ben Gamari's avatar
Ben Gamari committed
204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227
 *
 *  - Note [Large objects in the non-moving collector] (NonMovingMark.c)
 *    describes how we track large objects.
 *
 *  - Note [Update remembered set] (NonMovingMark.c) describes the function and
 *    implementation of the update remembered set used to realize the concurrent
 *    write barrier.
 *
 *  - Note [Concurrent read barrier on deRefWeak#] (NonMovingMark.c) describes
 *    the read barrier on Weak# objects.
 *
 *  - Note [Unintentional marking in resurrectThreads] (NonMovingMark.c) describes
 *    a tricky interaction between the update remembered set flush and weak
 *    finalization.
 *
 *  - Note [Origin references in the nonmoving collector] (NonMovingMark.h)
 *    describes how we implement indirection short-cutting and the selector
 *    optimisation.
 *
 *  - Note [StgStack dirtiness flags and concurrent marking] (TSO.h) describes
 *    the protocol for concurrent marking of stacks.
 *
 *  - Note [Static objects under the nonmoving collector] (Storage.c) describes
 *    treatment of static objects.
228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246
 *
 *
 * Note [Concurrent non-moving collection]
 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 * Concurrency-control of non-moving garbage collection is a bit tricky. There
 * are a few things to keep in mind:
 *
 *  - Only one non-moving collection may be active at a time. This is enforced by the
 *    concurrent_coll_running flag, which is set when a collection is on-going. If
 *    we attempt to initiate a new collection while this is set we wait on the
 *    concurrent_coll_finished condition variable, which signals when the
 *    active collection finishes.
 *
 *  - In between the mark and sweep phases the non-moving collector must synchronize
 *    with mutator threads to collect and mark their final update remembered
 *    sets. This is accomplished using
 *    stopAllCapabilitiesWith(SYNC_FLUSH_UPD_REM_SET). Capabilities are held
 *    the final mark has concluded.
 *
247 248 249 250 251 252 253 254 255 256 257 258 259 260 261
 * Note that possibility of concurrent minor and non-moving collections
 * requires that we handle static objects a bit specially. See
 * Note [Static objects under the nonmoving collector] in Storage.c
 * for details.
 *
 *
 * Note [Aging under the non-moving collector]
 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 *
 * The initial design of the non-moving collector mandated that all live data
 * be evacuated to the non-moving heap prior to a major collection. This
 * simplified certain bits of implementation and eased reasoning. However, it
 * was (unsurprisingly) also found to result in significant amounts of
 * unnecessary copying.
 *
Ben Gamari's avatar
Ben Gamari committed
262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352
 * Consequently, we now allow aging. Aging allows the preparatory GC leading up
 * to a major collection to evacuate some objects into the young generation.
 * However, this introduces the following tricky case that might arise after
 * we have finished the preparatory GC:
 *
 *       moving heap  ┆  non-moving heap
 *     ───────────────┆──────────────────
 *                    ┆
 *      B ←────────────── A ←─────────────── root
 *      │             ┆     ↖─────────────── gen1 mut_list
 *      ╰───────────────→ C
 *                    ┆
 *
 * In this case C is clearly live, but the non-moving collector can only see
 * this by walking through B, which lives in the moving heap. However, doing so
 * would require that we synchronize with the mutator/minor GC to ensure that it
 * isn't in the middle of moving B. What to do?
 *
 * The solution we use here is to teach the preparatory moving collector to
 * "evacuate" objects it encounters in the non-moving heap by adding them to
 * the mark queue. This is implemented by pushing the object to the update
 * remembered set of the capability held by the evacuating gc_thread
 * (implemented by markQueuePushClosureGC)
 *
 * Consequently collection of the case above would proceed as follows:
 *
 *  1. Initial state:
 *      * A lives in the non-moving heap and is reachable from the root set
 *      * A is on the oldest generation's mut_list, since it contains a pointer
 *        to B, which lives in a younger generation
 *      * B lives in the moving collector's from space
 *      * C lives in the non-moving heap
 *
 *  2. Preparatory GC: Scavenging mut_lists:
 *
 *     The mut_list of the oldest generation is scavenged, resulting in B being
 *     evacuated (aged) into the moving collector's to-space.
 *
 *  3. Preparatory GC: Scavenge B
 *
 *     B (now in to-space) is scavenged, resulting in evacuation of C.
 *     evacuate(C) pushes a reference to C to the mark queue.
 *
 *  4. Non-moving GC: C is marked
 *
 *     The non-moving collector will come to C in the mark queue and mark it.
 *
 *
 * Note [Deadlock detection under the non-moving collector]
 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 * In GHC the garbage collector is responsible for identifying deadlocked
 * programs. Providing for this responsibility is slightly tricky in the
 * non-moving collector due to the existence of aging. In particular, the
 * non-moving collector cannot traverse objects living in a young generation
 * but reachable from the non-moving generation, as described in Note [Aging
 * under the non-moving collector].
 *
 * However, this can pose trouble for deadlock detection since it means that we
 * may conservatively mark dead closures as live. Consider this case:
 *
 *       moving heap  ┆  non-moving heap
 *     ───────────────┆──────────────────
 *                    ┆
 *      MVAR_QUEUE ←───── TSO ←───────────── gen1 mut_list
 *        ↑  │  ╰────────↗  │
 *        │  │        ┆     │
 *        │  │        ┆     ↓
 *        │  ╰──────────→ MVAR
 *        ╰─────────────────╯
 *                    ┆
 *
 * In this case we have a TSO blocked on a dead MVar. Because the MVAR_TSO_QUEUE on
 * which it is blocked lives in the moving heap, the TSO is necessarily on the
 * oldest generation's mut_list. As in Note [Aging under the non-moving
 * collector], the MVAR_TSO_QUEUE will be evacuated. If MVAR_TSO_QUEUE is aged
 * (e.g. evacuated to the young generation) then the MVAR will be added to the
 * mark queue. Consequently, we will falsely conclude that the MVAR is still
 * alive and fail to spot the deadlock.
 *
 * To avoid this sort of situation we disable aging when we are starting a
 * major GC specifically for deadlock detection (as done by
 * scheduleDetectDeadlock). This condition is recorded by the
 * deadlock_detect_gc global variable declared in GC.h. Setting this has a few
 * effects on the preparatory GC:
 *
 *  - Evac.c:alloc_for_copy forces evacuation to the non-moving generation.
 *
 *  - The evacuation logic usually responsible for pushing objects living in
 *    the non-moving heap to the mark queue is disabled. This is safe because
 *    we know that all live objects will be in the non-moving heap by the end
 *    of the preparatory moving collection.
353
 *
354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373
 *
 * Note [Live data accounting in nonmoving collector]
 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 * The nonmoving collector uses an approximate heuristic for reporting live
 * data quantity. Specifically, during mark we record how much live data we
 * find in nonmoving_live_words. At the end of mark we declare this amount to
 * be how much live data we have on in the nonmoving heap (by setting
 * oldest_gen->live_estimate).
 *
 * In addition, we update oldest_gen->live_estimate every time we fill a
 * segment. This, as well, is quite approximate: we assume that all blocks
 * above next_free_next are newly-allocated. In principle we could refer to the
 * bitmap to count how many blocks we actually allocated but this too would be
 * approximate due to concurrent collection and ultimately seems more costly
 * than the problem demands.
 *
 */

memcount nonmoving_live_words = 0;

374 375 376
#if defined(THREADED_RTS)
static void* nonmovingConcurrentMark(void *mark_queue);
#endif
377 378
static void nonmovingMark_(MarkQueue *mark_queue, StgWeak **dead_weaks, StgTSO **resurrected_threads);

379
static void nonmovingInitSegment(struct NonmovingSegment *seg, uint8_t log_block_size)
380
{
381
    bdescr *bd = Bdescr((P_) seg);
382 383 384 385
    seg->link = NULL;
    seg->todo_link = NULL;
    seg->next_free = 0;
    nonmovingClearBitmap(seg);
386
    bd->nonmoving_segment.log_block_size = log_block_size;
387
    bd->nonmoving_segment.next_free_snap = 0;
388
    bd->u.scan = nonmovingSegmentGetBlock(seg, 0);
389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431
}

// Add a segment to the free list.
void nonmovingPushFreeSegment(struct NonmovingSegment *seg)
{
    // See Note [Live data accounting in nonmoving collector].
    if (nonmovingHeap.n_free > NONMOVING_MAX_FREE) {
        bdescr *bd = Bdescr((StgPtr) seg);
        ACQUIRE_SM_LOCK;
        ASSERT(oldest_gen->n_blocks >= bd->blocks);
        ASSERT(oldest_gen->n_words >= BLOCK_SIZE_W * bd->blocks);
        oldest_gen->n_blocks -= bd->blocks;
        oldest_gen->n_words  -= BLOCK_SIZE_W * bd->blocks;
        freeGroup(bd);
        RELEASE_SM_LOCK;
        return;
    }

    while (true) {
        struct NonmovingSegment *old = nonmovingHeap.free;
        seg->link = old;
        if (cas((StgVolatilePtr) &nonmovingHeap.free, (StgWord) old, (StgWord) seg) == (StgWord) old)
            break;
    }
    __sync_add_and_fetch(&nonmovingHeap.n_free, 1);
}

static struct NonmovingSegment *nonmovingPopFreeSegment(void)
{
    while (true) {
        struct NonmovingSegment *seg = nonmovingHeap.free;
        if (seg == NULL) {
            return NULL;
        }
        if (cas((StgVolatilePtr) &nonmovingHeap.free,
                (StgWord) seg,
                (StgWord) seg->link) == (StgWord) seg) {
            __sync_sub_and_fetch(&nonmovingHeap.n_free, 1);
            return seg;
        }
    }
}

432 433 434 435 436 437 438 439 440 441 442 443 444 445
unsigned int nonmovingBlockCountFromSize(uint8_t log_block_size)
{
  // We compute the overwhelmingly common size cases directly to avoid a very
  // expensive integer division.
  switch (log_block_size) {
    case 3:  return nonmovingBlockCount(3);
    case 4:  return nonmovingBlockCount(4);
    case 5:  return nonmovingBlockCount(5);
    case 6:  return nonmovingBlockCount(6);
    case 7:  return nonmovingBlockCount(7);
    default: return nonmovingBlockCount(log_block_size);
  }
}

446 447 448 449
/*
 * Request a fresh segment from the free segment list or allocate one of the
 * given node.
 *
450 451
 * Caller must hold SM_MUTEX (although we take the gc_alloc_block_sync spinlock
 * under the assumption that we are in a GC context).
452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493
 */
static struct NonmovingSegment *nonmovingAllocSegment(uint32_t node)
{
    // First try taking something off of the free list
    struct NonmovingSegment *ret;
    ret = nonmovingPopFreeSegment();

    // Nothing in the free list, allocate a new segment...
    if (ret == NULL) {
        // Take gc spinlock: another thread may be scavenging a moving
        // generation and call `todo_block_full`
        ACQUIRE_SPIN_LOCK(&gc_alloc_block_sync);
        bdescr *bd = allocAlignedGroupOnNode(node, NONMOVING_SEGMENT_BLOCKS);
        // See Note [Live data accounting in nonmoving collector].
        oldest_gen->n_blocks += bd->blocks;
        oldest_gen->n_words  += BLOCK_SIZE_W * bd->blocks;
        RELEASE_SPIN_LOCK(&gc_alloc_block_sync);

        for (StgWord32 i = 0; i < bd->blocks; ++i) {
            initBdescr(&bd[i], oldest_gen, oldest_gen);
            bd[i].flags = BF_NONMOVING;
        }
        ret = (struct NonmovingSegment *)bd->start;
    }

    // Check alignment
    ASSERT(((uintptr_t)ret % NONMOVING_SEGMENT_SIZE) == 0);
    return ret;
}

static inline unsigned long log2_floor(unsigned long x)
{
    return sizeof(unsigned long)*8 - 1 - __builtin_clzl(x);
}

static inline unsigned long log2_ceil(unsigned long x)
{
    unsigned long log = log2_floor(x);
    return (x - (1 << log)) ? log + 1 : log;
}

// Advance a segment's next_free pointer. Returns true if segment if full.
494
static bool advance_next_free(struct NonmovingSegment *seg, const unsigned int blk_count)
495
{
496
    const uint8_t *bitmap = seg->bitmap;
497
    ASSERT(blk_count == nonmovingSegmentBlockCount(seg));
498 499
#if defined(NAIVE_ADVANCE_FREE)
    // reference implementation
500 501 502 503 504 505 506 507
    for (unsigned int i = seg->next_free+1; i < blk_count; i++) {
        if (!bitmap[i]) {
            seg->next_free = i;
            return false;
        }
    }
    seg->next_free = blk_count;
    return true;
508 509 510 511 512 513 514 515 516 517
#else
    const uint8_t *c = memchr(&bitmap[seg->next_free+1], 0, blk_count - seg->next_free - 1);
    if (c == NULL) {
        seg->next_free = blk_count;
        return true;
    } else {
        seg->next_free = c - bitmap;
        return false;
    }
#endif
518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534
}

static struct NonmovingSegment *pop_active_segment(struct NonmovingAllocator *alloca)
{
    while (true) {
        struct NonmovingSegment *seg = alloca->active;
        if (seg == NULL) {
            return NULL;
        }
        if (cas((StgVolatilePtr) &alloca->active,
                (StgWord) seg,
                (StgWord) seg->link) == (StgWord) seg) {
            return seg;
        }
    }
}

535
/* Allocate a block in the nonmoving heap. Caller must hold SM_MUTEX. sz is in words */
536 537 538
GNUC_ATTR_HOT
void *nonmovingAllocate(Capability *cap, StgWord sz)
{
539 540
    unsigned int log_block_size = log2_ceil(sz * sizeof(StgWord));
    unsigned int block_count = nonmovingBlockCountFromSize(log_block_size);
541 542 543

    // The max we ever allocate is 3276 bytes (anything larger is a large
    // object and not moved) which is covered by allocator 9.
544
    ASSERT(log_block_size < NONMOVING_ALLOCA0 + NONMOVING_ALLOCA_CNT);
545

546
    struct NonmovingAllocator *alloca = nonmovingHeap.allocators[log_block_size - NONMOVING_ALLOCA0];
547 548 549 550

    // Allocate into current segment
    struct NonmovingSegment *current = alloca->current[cap->no];
    ASSERT(current); // current is never NULL
551
    void *ret = nonmovingSegmentGetBlock_(current, log_block_size, current->next_free);
552 553 554
    ASSERT(GET_CLOSURE_TAG(ret) == 0); // check alignment

    // Advance the current segment's next_free or allocate a new segment if full
555
    bool full = advance_next_free(current, block_count);
556 557 558 559 560 561 562
    if (full) {
        // Current segment is full: update live data estimate link it to
        // filled, take an active segment if one exists, otherwise allocate a
        // new segment.

        // Update live data estimate.
        // See Note [Live data accounting in nonmoving collector].
563
        unsigned int new_blocks = block_count - nonmovingSegmentInfo(current)->next_free_snap;
564 565
        unsigned int block_size = 1 << log_block_size;
        atomic_inc(&oldest_gen->live_estimate, new_blocks * block_size / sizeof(W_));
566 567 568 569 570 571 572 573 574 575

        // push the current segment to the filled list
        nonmovingPushFilledSegment(current);

        // first look for a new segment in the active list
        struct NonmovingSegment *new_current = pop_active_segment(alloca);

        // there are no active segments, allocate new segment
        if (new_current == NULL) {
            new_current = nonmovingAllocSegment(cap->node);
576
            nonmovingInitSegment(new_current, log_block_size);
577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598
        }

        // make it current
        new_current->link = NULL;
        alloca->current[cap->no] = new_current;
    }

    return ret;
}

/* Allocate a nonmovingAllocator */
static struct NonmovingAllocator *alloc_nonmoving_allocator(uint32_t n_caps)
{
    size_t allocator_sz =
        sizeof(struct NonmovingAllocator) +
        sizeof(void*) * n_caps; // current segment pointer for each capability
    struct NonmovingAllocator *alloc =
        stgMallocBytes(allocator_sz, "nonmovingInit");
    memset(alloc, 0, allocator_sz);
    return alloc;
}

599 600 601 602 603
static void free_nonmoving_allocator(struct NonmovingAllocator *alloc)
{
    stgFree(alloc);
}

604 605 606
void nonmovingInit(void)
{
    if (! RtsFlags.GcFlags.useNonmoving) return;
607 608 609 610 611
#if defined(THREADED_RTS)
    initMutex(&nonmoving_collection_mutex);
    initCondition(&concurrent_coll_finished);
    initMutex(&concurrent_coll_finished_lock);
#endif
612 613 614
    for (unsigned int i = 0; i < NONMOVING_ALLOCA_CNT; i++) {
        nonmovingHeap.allocators[i] = alloc_nonmoving_allocator(n_capabilities);
    }
615
    nonmovingMarkInitUpdRemSet();
616 617
}

618 619
// Stop any nonmoving collection in preparation for RTS shutdown.
void nonmovingStop(void)
620 621
{
    if (! RtsFlags.GcFlags.useNonmoving) return;
622 623 624 625 626 627 628
#if defined(THREADED_RTS)
    if (mark_thread) {
        debugTrace(DEBUG_nonmoving_gc,
                   "waiting for nonmoving collector thread to terminate");
        ACQUIRE_LOCK(&concurrent_coll_finished_lock);
        waitCondition(&concurrent_coll_finished, &concurrent_coll_finished_lock);
    }
629 630
#endif
}
631

632 633 634 635 636 637 638 639
void nonmovingExit(void)
{
    if (! RtsFlags.GcFlags.useNonmoving) return;

    // First make sure collector is stopped before we tear things down.
    nonmovingStop();

#if defined(THREADED_RTS)
640 641 642 643 644
    closeMutex(&concurrent_coll_finished_lock);
    closeCondition(&concurrent_coll_finished);
    closeMutex(&nonmoving_collection_mutex);
#endif

645
    for (unsigned int i = 0; i < NONMOVING_ALLOCA_CNT; i++) {
646
        free_nonmoving_allocator(nonmovingHeap.allocators[i]);
647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682
    }
}

/*
 * Assumes that no garbage collector or mutator threads are running to safely
 * resize the nonmoving_allocators.
 *
 * Must hold sm_mutex.
 */
void nonmovingAddCapabilities(uint32_t new_n_caps)
{
    unsigned int old_n_caps = nonmovingHeap.n_caps;
    struct NonmovingAllocator **allocs = nonmovingHeap.allocators;

    for (unsigned int i = 0; i < NONMOVING_ALLOCA_CNT; i++) {
        struct NonmovingAllocator *old = allocs[i];
        allocs[i] = alloc_nonmoving_allocator(new_n_caps);

        // Copy the old state
        allocs[i]->filled = old->filled;
        allocs[i]->active = old->active;
        for (unsigned int j = 0; j < old_n_caps; j++) {
            allocs[i]->current[j] = old->current[j];
        }
        stgFree(old);

        // Initialize current segments for the new capabilities
        for (unsigned int j = old_n_caps; j < new_n_caps; j++) {
            allocs[i]->current[j] = nonmovingAllocSegment(capabilities[j]->node);
            nonmovingInitSegment(allocs[i]->current[j], NONMOVING_ALLOCA0 + i);
            allocs[i]->current[j]->link = NULL;
        }
    }
    nonmovingHeap.n_caps = new_n_caps;
}

683
void nonmovingClearBitmap(struct NonmovingSegment *seg)
684 685 686 687 688 689 690 691
{
    unsigned int n = nonmovingSegmentBlockCount(seg);
    memset(seg->bitmap, 0, n);
}

/* Prepare the heap bitmaps and snapshot metadata for a mark */
static void nonmovingPrepareMark(void)
{
692 693 694 695 696
    // See Note [Static objects under the nonmoving collector].
    prev_static_flag = static_flag;
    static_flag =
        static_flag == STATIC_FLAG_A ? STATIC_FLAG_B : STATIC_FLAG_A;

697 698 699
    // Should have been cleared by the last sweep
    ASSERT(nonmovingHeap.sweep_list == NULL);

700 701 702 703 704 705 706
    nonmovingBumpEpoch();
    for (int alloca_idx = 0; alloca_idx < NONMOVING_ALLOCA_CNT; ++alloca_idx) {
        struct NonmovingAllocator *alloca = nonmovingHeap.allocators[alloca_idx];

        // Update current segments' snapshot pointers
        for (uint32_t cap_n = 0; cap_n < n_capabilities; ++cap_n) {
            struct NonmovingSegment *seg = alloca->current[cap_n];
707
            nonmovingSegmentInfo(seg)->next_free_snap = seg->next_free;
708 709
        }

710 711 712 713 714 715 716 717
        // Update filled segments' snapshot pointers and move to sweep_list
        uint32_t n_filled = 0;
        struct NonmovingSegment *const filled = alloca->filled;
        alloca->filled = NULL;
        if (filled) {
            struct NonmovingSegment *seg = filled;
            while (true) {
                // Set snapshot
718
                nonmovingSegmentInfo(seg)->next_free_snap = seg->next_free;
719
                n_filled++;
720 721 722 723 724 725 726 727
                if (seg->link)
                    seg = seg->link;
                else
                    break;
            }
            // add filled segments to sweep_list
            seg->link = nonmovingHeap.sweep_list;
            nonmovingHeap.sweep_list = filled;
728 729 730 731 732 733 734
        }

        // N.B. It's not necessary to update snapshot pointers of active segments;
        // they were set after they were swept and haven't seen any allocation
        // since.
    }

735 736 737 738 739 740
    // Clear large object bits of existing large objects
    for (bdescr *bd = nonmoving_large_objects; bd; bd = bd->link) {
        bd->flags &= ~BF_MARKED;
    }

    // Add newly promoted large objects and clear mark bits
741
    bdescr *next;
742
    ASSERT(oldest_gen->scavenged_large_objects == NULL);
743 744 745
    for (bdescr *bd = oldest_gen->large_objects; bd; bd = next) {
        next = bd->link;
        bd->flags |= BF_NONMOVING_SWEEPING;
746
        bd->flags &= ~BF_MARKED;
747 748 749 750 751 752 753 754
        dbl_link_onto(bd, &nonmoving_large_objects);
    }
    n_nonmoving_large_blocks += oldest_gen->n_large_blocks;
    oldest_gen->large_objects = NULL;
    oldest_gen->n_large_words = 0;
    oldest_gen->n_large_blocks = 0;
    nonmoving_live_words = 0;

755 756 757 758 759
    // Clear compact object mark bits
    for (bdescr *bd = nonmoving_compact_objects; bd; bd = bd->link) {
        bd->flags &= ~BF_MARKED;
    }

760 761 762 763
    // Move new compact objects from younger generations to nonmoving_compact_objects
    for (bdescr *bd = oldest_gen->compact_objects; bd; bd = next) {
        next = bd->link;
        bd->flags |= BF_NONMOVING_SWEEPING;
764
        bd->flags &= ~BF_MARKED;
765 766 767 768 769 770 771
        dbl_link_onto(bd, &nonmoving_compact_objects);
    }
    n_nonmoving_compact_blocks += oldest_gen->n_compact_blocks;
    oldest_gen->n_compact_blocks = 0;
    oldest_gen->compact_objects = NULL;
    // TODO (osa): what about "in import" stuff??

772 773


774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812
#if defined(DEBUG)
    debug_caf_list_snapshot = debug_caf_list;
    debug_caf_list = (StgIndStatic*)END_OF_CAF_LIST;
#endif
}

// Mark weak pointers in the non-moving heap. They'll either end up in
// dead_weak_ptr_list or stay in weak_ptr_list. Either way they need to be kept
// during sweep. See `MarkWeak.c:markWeakPtrList` for the moving heap variant
// of this.
static void nonmovingMarkWeakPtrList(MarkQueue *mark_queue, StgWeak *dead_weak_ptr_list)
{
    for (StgWeak *w = oldest_gen->weak_ptr_list; w; w = w->link) {
        markQueuePushClosure_(mark_queue, (StgClosure*)w);
        // Do not mark finalizers and values here, those fields will be marked
        // in `nonmovingMarkDeadWeaks` (for dead weaks) or
        // `nonmovingTidyWeaks` (for live weaks)
    }

    // We need to mark dead_weak_ptr_list too. This is subtle:
    //
    // - By the beginning of this GC we evacuated all weaks to the non-moving
    //   heap (in `markWeakPtrList`)
    //
    // - During the scavenging of the moving heap we discovered that some of
    //   those weaks are dead and moved them to `dead_weak_ptr_list`. Note that
    //   because of the fact above _all weaks_ are in the non-moving heap at
    //   this point.
    //
    // - So, to be able to traverse `dead_weak_ptr_list` and run finalizers we
    //   need to mark it.
    for (StgWeak *w = dead_weak_ptr_list; w; w = w->link) {
        markQueuePushClosure_(mark_queue, (StgClosure*)w);
        nonmovingMarkDeadWeak(mark_queue, w);
    }
}

void nonmovingCollect(StgWeak **dead_weaks, StgTSO **resurrected_threads)
{
813 814 815 816 817 818 819 820
#if defined(THREADED_RTS)
    // We can't start a new collection until the old one has finished
    // We also don't run in final GC
    if (concurrent_coll_running || sched_state > SCHED_RUNNING) {
        return;
    }
#endif

821
    trace(TRACE_nonmoving_gc, "Starting nonmoving GC preparation");
822 823 824 825 826 827 828
    resizeGenerations();

    nonmovingPrepareMark();

    // N.B. These should have been cleared at the end of the last sweep.
    ASSERT(nonmoving_marked_large_objects == NULL);
    ASSERT(n_nonmoving_marked_large_blocks == 0);
829 830
    ASSERT(nonmoving_marked_compact_objects == NULL);
    ASSERT(n_nonmoving_marked_compact_blocks == 0);
831 832 833 834 835 836

    MarkQueue *mark_queue = stgMallocBytes(sizeof(MarkQueue), "mark queue");
    initMarkQueue(mark_queue);
    current_mark_queue = mark_queue;

    // Mark roots
837
    trace(TRACE_nonmoving_gc, "Marking roots for nonmoving GC");
838 839 840 841 842 843 844 845 846 847 848 849 850
    markCAFs((evac_fn)markQueueAddRoot, mark_queue);
    for (unsigned int n = 0; n < n_capabilities; ++n) {
        markCapability((evac_fn)markQueueAddRoot, mark_queue,
                capabilities[n], true/*don't mark sparks*/);
    }
    markScheduler((evac_fn)markQueueAddRoot, mark_queue);
    nonmovingMarkWeakPtrList(mark_queue, *dead_weaks);
    markStablePtrTable((evac_fn)markQueueAddRoot, mark_queue);

    // Mark threads resurrected during moving heap scavenging
    for (StgTSO *tso = *resurrected_threads; tso != END_TSO_QUEUE; tso = tso->global_link) {
        markQueuePushClosure_(mark_queue, (StgClosure*)tso);
    }
851
    trace(TRACE_nonmoving_gc, "Finished marking roots for nonmoving GC");
852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873

    // Roots marked, mark threads and weak pointers

    // At this point all threads are moved to threads list (from old_threads)
    // and all weaks are moved to weak_ptr_list (from old_weak_ptr_list) by
    // the previous scavenge step, so we need to move them to "old" lists
    // again.

    // Fine to override old_threads because any live or resurrected threads are
    // moved to threads or resurrected_threads lists.
    ASSERT(oldest_gen->old_threads == END_TSO_QUEUE);
    ASSERT(nonmoving_old_threads == END_TSO_QUEUE);
    nonmoving_old_threads = oldest_gen->threads;
    oldest_gen->threads = END_TSO_QUEUE;

    // Make sure we don't lose any weak ptrs here. Weaks in old_weak_ptr_list
    // will either be moved to `dead_weaks` (if dead) or `weak_ptr_list` (if
    // alive).
    ASSERT(oldest_gen->old_weak_ptr_list == NULL);
    ASSERT(nonmoving_old_weak_ptr_list == NULL);
    nonmoving_old_weak_ptr_list = oldest_gen->weak_ptr_list;
    oldest_gen->weak_ptr_list = NULL;
874
    trace(TRACE_nonmoving_gc, "Finished nonmoving GC preparation");
875 876 877 878 879 880 881 882 883

    // We are now safe to start concurrent marking

    // Note that in concurrent mark we can't use dead_weaks and
    // resurrected_threads from the preparation to add new weaks and threads as
    // that would cause races between minor collection and mark. So we only pass
    // those lists to mark function in sequential case. In concurrent case we
    // allocate fresh lists.

884 885 886 887
#if defined(THREADED_RTS)
    // If we're interrupting or shutting down, do not let this capability go and
    // run a STW collection. Reason: we won't be able to acquire this capability
    // again for the sync if we let it go, because it'll immediately start doing
888
    // a major GC, because that's what we do when exiting scheduler (see
889 890 891 892 893
    // exitScheduler()).
    if (sched_state == SCHED_RUNNING) {
        concurrent_coll_running = true;
        nonmoving_write_barrier_enabled = true;
        debugTrace(DEBUG_nonmoving_gc, "Starting concurrent mark thread");
894 895 896 897
        if (createOSThread(&mark_thread, "non-moving mark thread",
                           nonmovingConcurrentMark, mark_queue) != 0) {
            barf("nonmovingCollect: failed to spawn mark thread: %s", strerror(errno));
        }
898 899 900 901
    } else {
        nonmovingConcurrentMark(mark_queue);
    }
#else
902 903 904
    // Use the weak and thread lists from the preparation for any new weaks and
    // threads found to be dead in mark.
    nonmovingMark_(mark_queue, dead_weaks, resurrected_threads);
905
#endif
906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924
}

/* Mark mark queue, threads, and weak pointers until no more weaks have been
 * resuscitated
 */
static void nonmovingMarkThreadsWeaks(MarkQueue *mark_queue)
{
    while (true) {
        // Propagate marks
        nonmovingMark(mark_queue);

        // Tidy threads and weaks
        nonmovingTidyThreads();

        if (! nonmovingTidyWeaks(mark_queue))
            return;
    }
}

925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945
#if defined(THREADED_RTS)
static void* nonmovingConcurrentMark(void *data)
{
    MarkQueue *mark_queue = (MarkQueue*)data;
    StgWeak *dead_weaks = NULL;
    StgTSO *resurrected_threads = (StgTSO*)&stg_END_TSO_QUEUE_closure;
    nonmovingMark_(mark_queue, &dead_weaks, &resurrected_threads);
    return NULL;
}

// TODO: Not sure where to put this function.
// Append w2 to the end of w1.
static void appendWeakList( StgWeak **w1, StgWeak *w2 )
{
    while (*w1) {
        w1 = &(*w1)->link;
    }
    *w1 = w2;
}
#endif

946 947
static void nonmovingMark_(MarkQueue *mark_queue, StgWeak **dead_weaks, StgTSO **resurrected_threads)
{
948
    ACQUIRE_LOCK(&nonmoving_collection_mutex);
949 950 951 952 953
    debugTrace(DEBUG_nonmoving_gc, "Starting mark...");

    // Do concurrent marking; most of the heap will get marked here.
    nonmovingMarkThreadsWeaks(mark_queue);

954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988
#if defined(THREADED_RTS)
    Task *task = newBoundTask();

    // If at this point if we've decided to exit then just return
    if (sched_state > SCHED_RUNNING) {
        // Note that we break our invariants here and leave segments in
        // nonmovingHeap.sweep_list, don't free nonmoving_large_objects etc.
        // However because we won't be running mark-sweep in the final GC this
        // is OK.

        // This is a RTS shutdown so we need to move our copy (snapshot) of
        // weaks (nonmoving_old_weak_ptr_list and nonmoving_weak_ptr_list) to
        // oldest_gen->threads to be able to run C finalizers in hs_exit_. Note
        // that there may be more weaks added to oldest_gen->threads since we
        // started mark, so we need to append our list to the tail of
        // oldest_gen->threads.
        appendWeakList(&nonmoving_old_weak_ptr_list, nonmoving_weak_ptr_list);
        appendWeakList(&oldest_gen->weak_ptr_list, nonmoving_old_weak_ptr_list);
        // These lists won't be used again so this is not necessary, but still
        nonmoving_old_weak_ptr_list = NULL;
        nonmoving_weak_ptr_list = NULL;

        goto finish;
    }

    // We're still running, request a sync
    nonmovingBeginFlush(task);

    bool all_caps_syncd;
    do {
        all_caps_syncd = nonmovingWaitForFlush();
        nonmovingMarkThreadsWeaks(mark_queue);
    } while (!all_caps_syncd);
#endif

989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013
    nonmovingResurrectThreads(mark_queue, resurrected_threads);

    // No more resurrecting threads after this point

    // Do last marking of weak pointers
    while (true) {
        // Propagate marks
        nonmovingMark(mark_queue);

        if (!nonmovingTidyWeaks(mark_queue))
            break;
    }

    nonmovingMarkDeadWeaks(mark_queue, dead_weaks);

    // Propagate marks
    nonmovingMark(mark_queue);

    // Now remove all dead objects from the mut_list to ensure that a younger
    // generation collection doesn't attempt to look at them after we've swept.
    nonmovingSweepMutLists();

    debugTrace(DEBUG_nonmoving_gc,
               "Done marking, resurrecting threads before releasing capabilities");

1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025

    // Schedule finalizers and resurrect threads
#if defined(THREADED_RTS)
    // Just pick a random capability. Not sure if this is a good idea -- we use
    // only one capability for all finalizers.
    scheduleFinalizers(capabilities[0], *dead_weaks);
    // Note that this mutates heap and causes running write barriers.
    // See Note [Unintentional marking in resurrectThreads] in NonMovingMark.c
    // for how we deal with this.
    resurrectThreads(*resurrected_threads);
#endif

1026 1027
#if defined(DEBUG)
    // Zap CAFs that we will sweep
1028
    nonmovingGcCafs();
1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056
#endif

    ASSERT(mark_queue->top->head == 0);
    ASSERT(mark_queue->blocks->link == NULL);

    // Update oldest_gen thread and weak lists
    // Note that we need to append these lists as a concurrent minor GC may have
    // added stuff to them while we're doing mark-sweep concurrently
    {
        StgTSO **threads = &oldest_gen->threads;
        while (*threads != END_TSO_QUEUE) {
            threads = &(*threads)->global_link;
        }
        *threads = nonmoving_threads;
        nonmoving_threads = END_TSO_QUEUE;
        nonmoving_old_threads = END_TSO_QUEUE;
    }

    {
        StgWeak **weaks = &oldest_gen->weak_ptr_list;
        while (*weaks) {
            weaks = &(*weaks)->link;
        }
        *weaks = nonmoving_weak_ptr_list;
        nonmoving_weak_ptr_list = NULL;
        nonmoving_old_weak_ptr_list = NULL;
    }

1057 1058 1059 1060 1061 1062
    // Everything has been marked; allow the mutators to proceed
#if defined(THREADED_RTS)
    nonmoving_write_barrier_enabled = false;
    nonmovingFinishFlush(task);
#endif

1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074
    current_mark_queue = NULL;
    freeMarkQueue(mark_queue);
    stgFree(mark_queue);

    oldest_gen->live_estimate = nonmoving_live_words;
    oldest_gen->n_old_blocks = 0;
    resizeGenerations();

    /****************************************************
     * Sweep
     ****************************************************/

1075 1076
    traceConcSweepBegin();

1077 1078 1079 1080
    // Because we can't mark large object blocks (no room for mark bit) we
    // collect them in a map in mark_queue and we pass it here to sweep large
    // objects
    nonmovingSweepLargeObjects();
1081
    nonmovingSweepCompactObjects();
1082 1083 1084 1085 1086
    nonmovingSweepStableNameTable();

    nonmovingSweep();
    ASSERT(nonmovingHeap.sweep_list == NULL);
    debugTrace(DEBUG_nonmoving_gc, "Finished sweeping.");
1087
    traceConcSweepEnd();
1088 1089 1090 1091
#if defined(DEBUG)
    if (RtsFlags.DebugFlags.nonmoving_gc)
        nonmovingPrintAllocatorCensus();
#endif
1092 1093

    // TODO: Remainder of things done by GarbageCollect (update stats)
1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107

#if defined(THREADED_RTS)
finish:
    boundTaskExiting(task);

    // We are done...
    mark_thread = 0;

    // Signal that the concurrent collection is finished, allowing the next
    // non-moving collection to proceed
    concurrent_coll_running = false;
    signalCondition(&concurrent_coll_finished);
    RELEASE_LOCK(&nonmoving_collection_mutex);
#endif
1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179
}

#if defined(DEBUG)

// Use this with caution: this doesn't work correctly during scavenge phase
// when we're doing parallel scavenging. Use it in mark phase or later (where
// we don't allocate more anymore).
void assert_in_nonmoving_heap(StgPtr p)
{
    if (!HEAP_ALLOCED_GC(p))
        return;

    bdescr *bd = Bdescr(p);
    if (bd->flags & BF_LARGE) {
        // It should be in a capability (if it's not filled yet) or in non-moving heap
        for (uint32_t cap = 0; cap < n_capabilities; ++cap) {
            if (bd == capabilities[cap]->pinned_object_block) {
                return;
            }
        }
        ASSERT(bd->flags & BF_NONMOVING);
        return;
    }

    // Search snapshot segments
    for (struct NonmovingSegment *seg = nonmovingHeap.sweep_list; seg; seg = seg->link) {
        if (p >= (P_)seg && p < (((P_)seg) + NONMOVING_SEGMENT_SIZE_W)) {
            return;
        }
    }

    for (int alloca_idx = 0; alloca_idx < NONMOVING_ALLOCA_CNT; ++alloca_idx) {
        struct NonmovingAllocator *alloca = nonmovingHeap.allocators[alloca_idx];
        // Search current segments
        for (uint32_t cap_idx = 0; cap_idx < n_capabilities; ++cap_idx) {
            struct NonmovingSegment *seg = alloca->current[cap_idx];
            if (p >= (P_)seg && p < (((P_)seg) + NONMOVING_SEGMENT_SIZE_W)) {
                return;
            }
        }

        // Search active segments
        int seg_idx = 0;
        struct NonmovingSegment *seg = alloca->active;
        while (seg) {
            if (p >= (P_)seg && p < (((P_)seg) + NONMOVING_SEGMENT_SIZE_W)) {
                return;
            }
            seg_idx++;
            seg = seg->link;
        }

        // Search filled segments
        seg_idx = 0;
        seg = alloca->filled;
        while (seg) {
            if (p >= (P_)seg && p < (((P_)seg) + NONMOVING_SEGMENT_SIZE_W)) {
                return;
            }
            seg_idx++;
            seg = seg->link;
        }
    }

    // We don't search free segments as they're unused

    barf("%p is not in nonmoving heap\n", (void*)p);
}

void nonmovingPrintSegment(struct NonmovingSegment *seg)
{
    int num_blocks = nonmovingSegmentBlockCount(seg);
1180
    uint8_t log_block_size = nonmovingSegmentLogBlockSize(seg);
1181 1182 1183

    debugBelch("Segment with %d blocks of size 2^%d (%d bytes, %u words, scan: %p)\n",
               num_blocks,
1184 1185 1186
               log_block_size,
               1 << log_block_size,
               (unsigned int) ROUNDUP_BYTES_TO_WDS(1 << log_block_size),
1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316
               (void*)Bdescr((P_)seg)->u.scan);

    for (nonmoving_block_idx p_idx = 0; p_idx < seg->next_free; ++p_idx) {
        StgClosure *p = (StgClosure*)nonmovingSegmentGetBlock(seg, p_idx);
        if (nonmovingGetMark(seg, p_idx) != 0) {
            debugBelch("%d (%p)* :\t", p_idx, (void*)p);
        } else {
            debugBelch("%d (%p)  :\t", p_idx, (void*)p);
        }
        printClosure(p);
    }

    debugBelch("End of segment\n\n");
}

void nonmovingPrintAllocator(struct NonmovingAllocator *alloc)
{
    debugBelch("Allocator at %p\n", (void*)alloc);
    debugBelch("Filled segments:\n");
    for (struct NonmovingSegment *seg = alloc->filled; seg != NULL; seg = seg->link) {
        debugBelch("%p ", (void*)seg);
    }
    debugBelch("\nActive segments:\n");
    for (struct NonmovingSegment *seg = alloc->active; seg != NULL; seg = seg->link) {
        debugBelch("%p ", (void*)seg);
    }
    debugBelch("\nCurrent segments:\n");
    for (uint32_t i = 0; i < n_capabilities; ++i) {
        debugBelch("%p ", alloc->current[i]);
    }
    debugBelch("\n");
}

void locate_object(P_ obj)
{
    // Search allocators
    for (int alloca_idx = 0; alloca_idx < NONMOVING_ALLOCA_CNT; ++alloca_idx) {
        struct NonmovingAllocator *alloca = nonmovingHeap.allocators[alloca_idx];
        for (uint32_t cap = 0; cap < n_capabilities; ++cap) {
            struct NonmovingSegment *seg = alloca->current[cap];
            if (obj >= (P_)seg && obj < (((P_)seg) + NONMOVING_SEGMENT_SIZE_W)) {
                debugBelch("%p is in current segment of capability %d of allocator %d at %p\n", obj, cap, alloca_idx, (void*)seg);
                return;
            }
        }
        int seg_idx = 0;
        struct NonmovingSegment *seg = alloca->active;
        while (seg) {
            if (obj >= (P_)seg && obj < (((P_)seg) + NONMOVING_SEGMENT_SIZE_W)) {
                debugBelch("%p is in active segment %d of allocator %d at %p\n", obj, seg_idx, alloca_idx, (void*)seg);
                return;
            }
            seg_idx++;
            seg = seg->link;
        }

        seg_idx = 0;
        seg = alloca->filled;
        while (seg) {
            if (obj >= (P_)seg && obj < (((P_)seg) + NONMOVING_SEGMENT_SIZE_W)) {
                debugBelch("%p is in filled segment %d of allocator %d at %p\n", obj, seg_idx, alloca_idx, (void*)seg);
                return;
            }
            seg_idx++;
            seg = seg->link;
        }
    }

    struct NonmovingSegment *seg = nonmovingHeap.free;
    int seg_idx = 0;
    while (seg) {
        if (obj >= (P_)seg && obj < (((P_)seg) + NONMOVING_SEGMENT_SIZE_W)) {
            debugBelch("%p is in free segment %d at %p\n", obj, seg_idx, (void*)seg);
            return;
        }
        seg_idx++;
        seg = seg->link;
    }

    // Search nurseries
    for (uint32_t nursery_idx = 0; nursery_idx < n_nurseries; ++nursery_idx) {
        for (bdescr* nursery_block = nurseries[nursery_idx].blocks; nursery_block; nursery_block = nursery_block->link) {
            if (obj >= nursery_block->start && obj <= nursery_block->start + nursery_block->blocks*BLOCK_SIZE_W) {
                debugBelch("%p is in nursery %d\n", obj, nursery_idx);
                return;
            }
        }
    }

    // Search generations
    for (uint32_t g = 0; g < RtsFlags.GcFlags.generations - 1; ++g) {
        generation *gen = &generations[g];
        for (bdescr *blk = gen->blocks; blk; blk = blk->link) {
            if (obj >= blk->start && obj < blk->free) {
                debugBelch("%p is in generation %" FMT_Word32 " blocks\n", obj, g);
                return;
            }
        }
        for (bdescr *blk = gen->old_blocks; blk; blk = blk->link) {
            if (obj >= blk->start && obj < blk->free) {
                debugBelch("%p is in generation %" FMT_Word32 " old blocks\n", obj, g);
                return;
            }
        }
    }

    // Search large objects
    for (uint32_t g = 0; g < RtsFlags.GcFlags.generations - 1; ++g) {
        generation *gen = &generations[g];
        for (bdescr *large_block = gen->large_objects; large_block; large_block = large_block->link) {
            if ((P_)large_block->start == obj) {
                debugBelch("%p is in large blocks of generation %d\n", obj, g);
                return;
            }
        }
    }

    for (bdescr *large_block = nonmoving_large_objects; large_block; large_block = large_block->link) {
        if ((P_)large_block->start == obj) {
            debugBelch("%p is in nonmoving_large_objects\n", obj);
            return;
        }
    }

    for (bdescr *large_block = nonmoving_marked_large_objects; large_block; large_block = large_block->link) {
        if ((P_)large_block->start == obj) {
            debugBelch("%p is in nonmoving_marked_large_objects\n", obj);
            return;
        }
    }
1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341

    // Search workspaces FIXME only works in non-threaded runtime
#if !defined(THREADED_RTS)
    for (uint32_t g = 0; g < RtsFlags.GcFlags.generations - 1; ++ g) {
        gen_workspace *ws = &gct->gens[g];
        for (bdescr *blk = ws->todo_bd; blk; blk = blk->link) {
            if (obj >= blk->start && obj < blk->free) {
                debugBelch("%p is in generation %" FMT_Word32 " todo bds\n", obj, g);
                return;
            }
        }
        for (bdescr *blk = ws->scavd_list; blk; blk = blk->link) {
            if (obj >= blk->start && obj < blk->free) {
                debugBelch("%p is in generation %" FMT_Word32 " scavd bds\n", obj, g);
                return;
            }
        }
        for (bdescr *blk = ws->todo_large_objects; blk; blk = blk->link) {
            if (obj >= blk->start && obj < blk->free) {
                debugBelch("%p is in generation %" FMT_Word32 " todo large bds\n", obj, g);
                return;
            }
        }
    }
#endif
1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387
}

void nonmovingPrintSweepList()
{
    debugBelch("==== SWEEP LIST =====\n");
    int i = 0;
    for (struct NonmovingSegment *seg = nonmovingHeap.sweep_list; seg; seg = seg->link) {
        debugBelch("%d: %p\n", i++, (void*)seg);
    }
    debugBelch("= END OF SWEEP LIST =\n");
}

void check_in_mut_list(StgClosure *p)
{
    for (uint32_t cap_n = 0; cap_n < n_capabilities; ++cap_n) {
        for (bdescr *bd = capabilities[cap_n]->mut_lists[oldest_gen->no]; bd; bd = bd->link) {
            for (StgPtr q = bd->start; q < bd->free; ++q) {
                if (*((StgPtr**)q) == (StgPtr*)p) {
                    debugBelch("Object is in mut list of cap %d: %p\n", cap_n, capabilities[cap_n]->mut_lists[oldest_gen->no]);
                    return;
                }
            }
        }
    }

    debugBelch("Object is not in a mut list\n");
}

void print_block_list(bdescr* bd)
{
    while (bd) {
        debugBelch("%p, ", (void*)bd);
        bd = bd->link;
    }
    debugBelch("\n");
}

void print_thread_list(StgTSO* tso)
{
    while (tso != END_TSO_QUEUE) {
        printClosure((StgClosure*)tso);
        tso = tso->global_link;
    }
}

#endif