CheckUnload.c 9.25 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94
/* ----------------------------------------------------------------------------
 *
 * (c) The GHC Team, 2013-
 *
 * Check whether dynamically-loaded object code can be safely
 * unloaded, by searching for references to it from the heap and RTS
 * data structures.
 *
 * --------------------------------------------------------------------------*/

#include "PosixSource.h"
#include "Rts.h"

#include "RtsUtils.h"
#include "Hash.h"
#include "LinkerInternals.h"
#include "CheckUnload.h"
#include "sm/Storage.h"
#include "sm/GCThread.h"

//
// Code that we unload may be referenced from:
//   - info pointers in heap objects and stack frames
//   - pointers to static objects from the heap
//   - StablePtrs to static objects
//
// We can find live static objects after a major GC, so we don't have
// to look at every closure pointer in the heap.  However, we do have
// to look at every info pointer.  So this is like a heap census
// traversal: we look at the header of every object, but not its
// contents.
//
// On the assumption that there aren't many different info pointers in
// a typical heap, we insert addresses into a hash table.  The
// first time we see an address, we check it against the pending
// unloadable objects and if it lies within any of them, we mark that
// object as referenced so that it won't get unloaded in this round.
//

static void checkAddress (HashTable *addrs, void *addr)
{
    ObjectCode *oc;

    if (!lookupHashTable(addrs, (W_)addr)) {
        insertHashTable(addrs, (W_)addr, addr);

        for (oc = unloaded_objects; oc; oc = oc->next) {
            if ((W_)addr >= (W_)oc->image &&
                (W_)addr <  (W_)oc->image + oc->fileSize) {
                oc->referenced = 1;
                break;
            }
        }
    }
}

static void searchStackChunk (HashTable *addrs, StgPtr sp, StgPtr stack_end)
{
    StgPtr p;
    const StgRetInfoTable *info;

    p = sp;
    while (p < stack_end) {
        info = get_ret_itbl((StgClosure *)p);

        switch (info->i.type) {
        case RET_SMALL:
        case RET_BIG:
            checkAddress(addrs, (void*)info);
            break;

        default:
            break;
        }

        p += stack_frame_sizeW((StgClosure*)p);
    }
}


static void searchHeapBlocks (HashTable *addrs, bdescr *bd)
{
    StgPtr p;
    StgInfoTable *info;
    nat size;
    rtsBool prim;

    for (; bd != NULL; bd = bd->link) {

        if (bd->flags & BF_PINNED) {
            // Assume that objects in PINNED blocks cannot refer to
            continue;
        }

95 96 97
        p = bd->start;
        while (p < bd->free) {
            info = get_itbl((StgClosure *)p);
98 99
            prim = rtsFalse;

100
            switch (info->type) {
101

102
            case THUNK:
103
                size = thunk_sizeW_fromITBL(info);
104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119
                break;

            case THUNK_1_1:
            case THUNK_0_2:
            case THUNK_2_0:
                size = sizeofW(StgThunkHeader) + 2;
                break;

            case THUNK_1_0:
            case THUNK_0_1:
            case THUNK_SELECTOR:
                size = sizeofW(StgThunkHeader) + 1;
                break;

            case CONSTR:
            case FUN:
120
            case FUN_1_0:
121 122 123 124 125 126 127 128 129 130 131 132 133 134 135
            case FUN_0_1:
            case FUN_1_1:
            case FUN_0_2:
            case FUN_2_0:
            case CONSTR_1_0:
            case CONSTR_0_1:
            case CONSTR_1_1:
            case CONSTR_0_2:
            case CONSTR_2_0:
                size = sizeW_fromITBL(info);
                break;

            case IND_PERM:
            case BLACKHOLE:
            case BLOCKING_QUEUE:
136 137
                prim = rtsTrue;
                size = sizeW_fromITBL(info);
138
                break;
139 140

            case IND:
141 142 143 144 145 146 147
                // Special case/Delicate Hack: INDs don't normally
                // appear, since we're doing this heap census right
                // after GC.  However, GarbageCollect() also does
                // resurrectThreads(), which can update some
                // blackholes when it calls raiseAsync() on the
                // resurrected threads.  So we know that any IND will
                // be the size of a BLACKHOLE.
148 149
                prim = rtsTrue;
                size = BLACKHOLE_sizeW();
150
                break;
151

152
            case BCO:
153
                prim = rtsTrue;
154 155
                size = bco_sizeW((StgBCO *)p);
                break;
156 157 158 159 160

            case MVAR_CLEAN:
            case MVAR_DIRTY:
            case TVAR:
            case WEAK:
161 162 163 164 165 166 167 168 169
            case PRIM:
            case MUT_PRIM:
            case MUT_VAR_CLEAN:
            case MUT_VAR_DIRTY:
                prim = rtsTrue;
                size = sizeW_fromITBL(info);
                break;

            case AP:
170 171
                prim = rtsTrue;
                size = ap_sizeW((StgAP *)p);
172
                break;
173

174
            case PAP:
175 176
                prim = rtsTrue;
                size = pap_sizeW((StgPAP *)p);
177
                break;
178

179
            case AP_STACK:
180 181 182 183 184 185 186 187 188
            {
                StgAP_STACK *ap = (StgAP_STACK *)p;
                prim = rtsTrue;
                size = ap_stack_sizeW(ap);
                searchStackChunk(addrs, (StgPtr)ap->payload,
                                 (StgPtr)ap->payload + ap->size);
                break;
            }

189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211
            case ARR_WORDS:
                prim = rtsTrue;
                size = arr_words_sizeW((StgArrWords*)p);
                break;

            case MUT_ARR_PTRS_CLEAN:
            case MUT_ARR_PTRS_DIRTY:
            case MUT_ARR_PTRS_FROZEN:
            case MUT_ARR_PTRS_FROZEN0:
                prim = rtsTrue;
                size = mut_arr_ptrs_sizeW((StgMutArrPtrs *)p);
                break;

            case SMALL_MUT_ARR_PTRS_CLEAN:
            case SMALL_MUT_ARR_PTRS_DIRTY:
            case SMALL_MUT_ARR_PTRS_FROZEN:
            case SMALL_MUT_ARR_PTRS_FROZEN0:
                prim = rtsTrue;
                size = small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs *)p);
                break;

            case TSO:
                prim = rtsTrue;
212
                size = sizeofW(StgTSO);
213
                break;
214 215 216 217 218 219 220

            case STACK: {
                StgStack *stack = (StgStack*)p;
                prim = rtsTrue;
                searchStackChunk(addrs, stack->sp,
                                 stack->stack + stack->stack_size);
                size = stack_sizeW(stack);
221
                break;
222 223 224
            }

            case TREC_CHUNK:
225 226 227 228 229 230 231 232
                prim = rtsTrue;
                size = sizeofW(StgTRecChunk);
                break;

            default:
                barf("heapCensus, unknown object: %d", info->type);
            }

233 234 235 236
            if (!prim) {
                checkAddress(addrs,info);
            }

237 238
            p += size;
        }
239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256
    }
}

//
// Check whether we can unload any object code.  This is called at the
// appropriate point during a GC, where all the heap data is nice and
// packed together and we have a linked list of the static objects.
//
// The check involves a complete heap traversal, but you only pay for
// this (a) when you have called unloadObj(), and (b) at a major GC,
// which is much more expensive than the traversal we're doing here.
//
void checkUnload (StgClosure *static_objects)
{
  nat g, n;
  HashTable *addrs;
  StgClosure* p;
  const StgInfoTable *info;
257
  ObjectCode *oc, *prev, *next;
258 259 260 261 262
  gen_workspace *ws;
  StgClosure* link;

  if (unloaded_objects == NULL) return;

263
  ACQUIRE_LOCK(&linker_unloaded_mutex);
264

265 266
  // Mark every unloadable object as unreferenced initially
  for (oc = unloaded_objects; oc; oc = oc->next) {
267
      IF_DEBUG(linker, debugBelch("Checking whether to unload %" PATH_FMT "\n",
268 269 270 271 272 273
                                  oc->fileName));
      oc->referenced = rtsFalse;
  }

  addrs = allocHashTable();

274
  for (p = static_objects; p != END_OF_STATIC_LIST; p = link) {
275 276 277 278 279
      checkAddress(addrs, p);
      info = get_itbl(p);
      link = *STATIC_LINK(info, p);
  }

280 281
  // CAFs on revertible_caf_list are not on static_objects
  for (p = (StgClosure*)revertible_caf_list;
282
       p != END_OF_STATIC_LIST;
283 284 285 286
       p = ((StgIndStatic *)p)->static_link) {
      checkAddress(addrs, p);
  }

287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302
  for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
      searchHeapBlocks (addrs, generations[g].blocks);
      searchHeapBlocks (addrs, generations[g].large_objects);

      for (n = 0; n < n_capabilities; n++) {
          ws = &gc_threads[n]->gens[g];
          searchHeapBlocks(addrs, ws->todo_bd);
          searchHeapBlocks(addrs, ws->part_list);
          searchHeapBlocks(addrs, ws->scavd_list);
      }
  }

  // Look through the unloadable objects, and any object that is still
  // marked as unreferenced can be physically unloaded, because we
  // have no references to it.
  prev = NULL;
303
  for (oc = unloaded_objects; oc; oc = next) {
304
      next = oc->next;
305 306 307 308 309 310
      if (oc->referenced == 0) {
          if (prev == NULL) {
              unloaded_objects = oc->next;
          } else {
              prev->next = oc->next;
          }
311
          IF_DEBUG(linker, debugBelch("Unloading object file %" PATH_FMT "\n",
312 313 314
                                      oc->fileName));
          freeObjectCode(oc);
      } else {
315 316
          IF_DEBUG(linker, debugBelch("Object file still in use: %"
                                      PATH_FMT "\n", oc->fileName));
317
          prev = oc;
318 319 320 321
      }
  }

  freeHashTable(addrs, NULL);
322

323
  RELEASE_LOCK(&linker_unloaded_mutex);
324
}