1 /*
2  * Extension to GENCGC which provides for pages of objects
3  * that are static in placement but subject to reclamation.
4  */
5 
6 /*
7  * This software is part of the SBCL system. See the README file for
8  * more information.
9  *
10  * This software is derived from the CMU CL system, which was
11  * written at Carnegie Mellon University and released into the
12  * public domain. The software is in the public domain and is
13  * provided with absolutely no warranty. See the COPYING and CREDITS
14  * files for more information.
15  */
16 
17 /*
18  * TODO:
19  *  1. Space accounting (GET-BYTES-CONSED etc)
20  *  2. Heuristic for auto-trigger. (Can't yet because no space accounting)
21  *     Currently happens with regular GC trigger mechanism.
22  *  3. Specify space size on startup
23  */
24 
25 // Work around a bug in some llvm/clang versions affecting the memcpy
26 // call in defrag_immobile_space:
27 //
28 // When compiled with _FORTIFY_SOURCE nonzero, as seems to be the
29 // default, memcpy is a macro that expands to
30 // __builtin_memcpy_chk(dst, source, size, __builtin_object_size(...)).
31 //
32 // Now usually if the compiler knows that it does not know
33 // __builtin_object_size for the source of the copy, the
34 // __builtin_memcpy_chk call becomes plain old memcpy. But in the
35 // buggy case, the compiler is convinced that it does know the
36 // size. This shows up clearly in the disassembly, where the library
37 // routine receives a source size that was erroneously determined to
38 // be a compile-time constant 0. Thus the assertion failure is that
39 // you are reading from a range with 0 bytes in it.
40 //
41 // Defining _FORTIFY_LEVEL 0 disables the above macro and thus works
42 // around the problem. Since it is unclear which clang versions are
43 // affected, only apply the workaround for the known-bad version.
44 #if (defined(__clang__) && (__clang_major__ == 6) && (__clang_minor__ == 0))
45 #define _FORTIFY_SOURCE 0
46 #endif
47 
48 #include "gc.h"
49 #include "gc-internal.h"
50 #include "genesis/vector.h"
51 
52 #include <stdlib.h>
53 #include <stdio.h>
54 
55 #define FIRST_VARYOBJ_PAGE (IMMOBILE_FIXEDOBJ_SUBSPACE_SIZE/(int)IMMOBILE_CARD_BYTES)
56 #define WORDS_PER_PAGE ((int)IMMOBILE_CARD_BYTES/N_WORD_BYTES)
57 #define DOUBLEWORDS_PER_PAGE (WORDS_PER_PAGE/2)
58 
59 #undef DEBUG
60 #undef VERIFY_PAGE_GENS
61 
62 #ifdef DEBUG
63 #  define dprintf(arg) fprintf arg
64 FILE * logfile;
65 #else
66 #  define dprintf(arg)
67 #endif
68 
69 low_page_index_t max_used_fixedobj_page, max_used_varyobj_page;
70 
71 // This table is for objects fixed in size, as opposed to variable-sized.
72 // (Immobile objects are naturally fixed in placement)
73 struct fixedobj_page { // 12 bytes per page
74     union immobile_page_attr {
75       int packed;
76       struct {
77         unsigned char flags;
78         /* space per object in Lisp words. Can exceed obj_size
79            to align on a larger boundary */
80         unsigned char obj_align;
81         unsigned char obj_size; /* in Lisp words, incl. header */
82         /* Which generations have data on this page */
83         unsigned char gens_; // a bitmap
84       } parts;
85     } attr;
86     int free_index; // index is in bytes. 4 bytes
87     short int prior_gc_free_word_index; // index is in words. 2 bytes
88     /* page index of next page with same attributes */
89     short int page_link; // 2 bytes
90 } *fixedobj_pages;
91 
92 unsigned int* immobile_scav_queue;
93 int immobile_scav_queue_head;
94 // Number of items enqueued; can exceed QCAPACITY on overflow.
95 // If overflowed, the queue is unusable until reset.
96 unsigned int immobile_scav_queue_count;
97 #define QCAPACITY (IMMOBILE_CARD_BYTES/sizeof(int))
98 
99 #define gens attr.parts.gens_
100 
101 // These are the high 2 bits of 'flags'
102 #define WRITE_PROTECT         0x80
103 #define WRITE_PROTECT_CLEARED 0x40
104 
105 // Packing and unpacking attributes
106 // the low two flag bits are for write-protect status
107 #define MAKE_ATTR(spacing,size,flags) (((spacing)<<8)|((size)<<16)|flags)
108 #define OBJ_SPACING(attr) ((attr>>8) & 0xFF)
109 
110 // Ignore the write-protect bits and the generations when comparing attributes
111 #define ATTRIBUTES_MATCH_P(page_attr,specified_attr) \
112   ((page_attr & 0xFFFF3F) == specified_attr)
113 #define SET_WP_FLAG(index,flag) \
114   fixedobj_pages[index].attr.parts.flags = (fixedobj_pages[index].attr.parts.flags & 0x3F) | flag
115 
116 #define page_obj_align(i) fixedobj_pages[i].attr.parts.obj_align
117 #define page_obj_size(i) fixedobj_pages[i].attr.parts.obj_size
118 #define set_page_full(i) fixedobj_pages[i].free_index = IMMOBILE_CARD_BYTES
119 #define page_full_p(i) (fixedobj_pages[i].free_index >= (int)IMMOBILE_CARD_BYTES)
120 #define fixedobj_page_wp(i) (fixedobj_pages[i].attr.parts.flags & WRITE_PROTECT)
121 
122 /// Variable-length pages:
123 
124 // Array of inverted write-protect flags, 1 bit per page.
125 unsigned int* varyobj_page_touched_bits;
126 static int n_bitmap_elts; // length of array measured in 'int's
127 
128 // Array of offsets backwards in double-lispwords from the page end
129 // to the lowest-addressed object touching the page. This offset can
130 // point to a hole, but we prefer that it not. If the offset is zero,
131 // the page has no object other than possibly a hole resulting
132 // from a freed object.
133 unsigned short* varyobj_page_scan_start_offset;
134 
135 // Array of page generation masks
136 unsigned char* varyobj_page_header_gens;
137 // Holes to be stuffed back into the managed free list.
138 lispobj varyobj_holes;
139 
140 #define VARYOBJ_PAGE_GENS(x) varyobj_page_header_gens[x-FIRST_VARYOBJ_PAGE]
141 #define varyobj_page_touched(x) \
142   ((varyobj_page_touched_bits[(x-FIRST_VARYOBJ_PAGE)/32] >> (x&31)) & 1)
143 
144 #ifdef VERIFY_PAGE_GENS
145 void check_fixedobj_page(low_page_index_t);
146 void check_varyobj_pages();
147 #endif
148 
149 // Object header:  generation byte --|    |-- widetag
150 //                                   v    v
151 //                       0xzzzzzzzz GGzzzzww
152 //         arbitrary data  --------   ---- length in words
153 //
154 // There is a hard constraint on NUM_GENERATIONS, which is currently 8.
155 // (0..5=normal, 6=pseudostatic, 7=scratch)
156 // It could be as high as 16 for 32-bit words (wherein scratch=gen15)
157 // or 32 for 64-bits words (wherein scratch=gen31).
158 // In each case, the VISITED flag bit weight would need to be modified.
159 // Shifting a 1 bit left by the contents of the generation byte
160 // must not overflow a register.
161 
162 #ifdef LISP_FEATURE_LITTLE_ENDIAN
assign_generation(lispobj * obj,generation_index_t gen)163 static inline void assign_generation(lispobj* obj, generation_index_t gen)
164 {
165     ((generation_index_t*)obj)[3] = gen;
166 }
167 // Turn a grey node black.
set_visited(lispobj * obj)168 static inline void set_visited(lispobj* obj)
169 {
170 #ifdef DEBUG
171     gc_assert(__immobile_obj_gen_bits(obj) == new_space);
172 #endif
173     ((generation_index_t*)obj)[3] |= IMMOBILE_OBJ_VISITED_FLAG;
174 }
175 #else
176 #error "Need to define assign_generation() for big-endian"
177 #endif
178 
179 static inline void *
low_page_address(low_page_index_t page_num)180 low_page_address(low_page_index_t page_num)
181 {
182     return ((void*)IMMOBILE_SPACE_START + (page_num * IMMOBILE_CARD_BYTES));
183 }
184 
185 //// Variable-length utilities
186 
187 /* Calculate the address where the first object touching this page starts. */
188 static inline lispobj*
varyobj_scan_start(low_page_index_t page_index)189 varyobj_scan_start(low_page_index_t page_index)
190 {
191     return (lispobj*)((char*)low_page_address(page_index+1)
192                       - varyobj_page_scan_start_offset[page_index - FIRST_VARYOBJ_PAGE]
193                         * (2 * N_WORD_BYTES));
194 }
195 
196 /* Return the generation mask for objects headers on 'page_index'
197    including at most one object that starts before the page but ends on
198    or after it.
199    If the scan start is within the page, i.e. less than DOUBLEWORDS_PER_PAGE
200    (note that the scan start is measured relative to the page end) then
201    we don't need to OR in the generation byte from an extra object,
202    as all headers on the page are accounted for in the page generation mask.
203    Also an empty page (where scan start is zero) avoids looking
204    at the next page's first object by accident via the same test. */
varyobj_page_gens_augmented(low_page_index_t page_index)205 unsigned char varyobj_page_gens_augmented(low_page_index_t page_index)
206 {
207   return (varyobj_page_scan_start_offset[page_index - FIRST_VARYOBJ_PAGE] <= DOUBLEWORDS_PER_PAGE
208           ? 0 : (1<<__immobile_obj_generation(varyobj_scan_start(page_index))))
209          | VARYOBJ_PAGE_GENS(page_index);
210 }
211 
212 //// Fixed-length object allocator
213 
214 /* Return the index of an immobile page that is probably not totally full,
215    starting with 'hint_page' and wrapping around.
216    'attributes' determine an eligible page.
217    *IMMOBILE-SPACE-FREE-POINTER* is updated to point beyond the found page
218    if it previously did not. */
219 
get_freeish_page(int hint_page,int attributes)220 static int get_freeish_page(int hint_page, int attributes)
221 {
222   int page = hint_page;
223   lispobj new_free_pointer, old_free_pointer, actual_old;
224   struct symbol * free_pointer_sym;
225   int page_attr_packed;
226   unsigned char best_genmask = 0xff;
227   int best_page = -1;
228 
229   // Speed of this could be improved by keeping a linked list of pages
230   // with any space available, headed by a field in the page struct.
231   // This is totally lock-free / wait-free though, so it's really not
232   // too shabby, because it never has to deal with a page-table mutex.
233   do {
234       page_attr_packed = fixedobj_pages[page].attr.packed;
235       if (page_attr_packed == 0)
236           if ((page_attr_packed =
237                __sync_val_compare_and_swap(&fixedobj_pages[page].attr.packed,
238                                            0, attributes)) == 0) {
239               // Atomically assign MAX(old_free_pointer, new_free_pointer)
240               // into the free pointer.
241               free_pointer_sym = SYMBOL(IMMOBILE_FIXEDOBJ_FREE_POINTER);
242               new_free_pointer = (lispobj)low_page_address(page+1);
243               old_free_pointer = free_pointer_sym->value;
244               while (new_free_pointer > old_free_pointer) {
245                   actual_old =
246                     __sync_val_compare_and_swap(&free_pointer_sym->value,
247                                                 old_free_pointer,
248                                                 new_free_pointer);
249                   if (actual_old == old_free_pointer)
250                       break;
251                   old_free_pointer = actual_old;
252               }
253               return page;
254           }
255       if (ATTRIBUTES_MATCH_P(page_attr_packed, attributes)
256           && !page_full_p(page)) {
257           if (fixedobj_pages[page].gens <= 1) { // instant win
258             return page;
259           } else if (fixedobj_pages[page].gens < best_genmask) {
260             best_genmask = fixedobj_pages[page].gens;
261             best_page = page;
262           }
263       }
264       if (++page >= FIRST_VARYOBJ_PAGE) page = 0;
265   } while (page != hint_page);
266   if (best_page >= 0)
267       return best_page;
268   lose("No more immobile pages available");
269 }
270 
271 // Unused, but possibly will be for some kind of collision-avoidance scheme
272 // on claiming of new free pages.
273 long immobile_alloc_collisions;
274 
275 /* Beginning at page index *hint, attempt to find space
276    for an object on a page with page_attributes. Write its header word
277    and return a C (native) pointer. The start page MUST have the proper
278    characteristisc, but might be totally full.
279 
280    Precondition: Lisp has established a pseudo-atomic section. */
281 
282 /* There is a slightly different algorithm that would probably be faster
283    than what is currently implemented:
284    - hint should be the address of a word that you try to claim
285      as an object header; it moves from high-to-low instead of low-to-high.
286      It's easier to compute the page base than the last valid object start
287      if there are some wasted words at the end due to page size not being
288      a perfect multiple of object size.
289    - you do a CAS into that word, and either suceed or fail
290    - if you succeed, subtract the object spacing and compare
291      to the page's base address, which can be computed by
292      masking. if the next address is above or equal to the page start,
293      store it in the hint, otherwise mark the page full */
294 
alloc_immobile_obj(int page_attributes,lispobj header,int * hint)295 lispobj alloc_immobile_obj(int page_attributes, lispobj header, int* hint)
296 {
297   int page;
298   lispobj word;
299   char * page_data, * obj_ptr, * next_obj_ptr, * limit, * next_free;
300   int spacing_in_bytes = OBJ_SPACING(page_attributes) << WORD_SHIFT;
301 
302   page = *hint;
303 #ifdef DEBUG
304   gc_assert(low_page_address(page) < (void*)SYMBOL(IMMOBILE_FIXEDOBJ_FREE_POINTER)->value);
305 #endif
306   do {
307       page_data = low_page_address(page);
308       obj_ptr = page_data + fixedobj_pages[page].free_index;
309       limit = page_data + IMMOBILE_CARD_BYTES - spacing_in_bytes;
310       while (obj_ptr <= limit) {
311           word = *(lispobj*)obj_ptr;
312           next_obj_ptr = obj_ptr + spacing_in_bytes;
313           if (fixnump(word) // a fixnum marks free space
314               && __sync_bool_compare_and_swap((lispobj*)obj_ptr,
315                                               word, header)) {
316               // The value formerly in the header word was the offset to
317               // the next hole. Use it to update the freelist pointer.
318               // Just slam it in.
319               fixedobj_pages[page].free_index = next_obj_ptr + word - page_data;
320               return (lispobj)obj_ptr;
321           }
322           // If some other thread updated the free_index
323           // to a larger value, use that. (See example below)
324           next_free = page_data + fixedobj_pages[page].free_index;
325           obj_ptr = next_free > next_obj_ptr ? next_free : next_obj_ptr;
326       }
327       set_page_full(page);
328       page = get_freeish_page(page+1 >= FIRST_VARYOBJ_PAGE ? 0 : page+1,
329                               page_attributes);
330       *hint = page;
331   } while (1);
332 }
333 
334 /*
335 Example: Conside the freelist initially pointing to word index 6
336 Threads A, and B, and C each want to claim index 6.
337 - Thread A wins and then is switched out immediately after the CAS.
338 - Thread B fails to claim cell 6, claims cell 12 instead.
339 - Thread C fails to claim a cell and is switched out immediately
340   after the CAS.
341 - Thread B writes the index of the next hole, cell 18 into the
342   page's freelist cell.
343 - Thread A wakes up and writes 12 into the freelist cell.
344 - Thread C wakes up sees 12 for next_offset. 12 is greater than 6,
345   so it sets its next probe location to 12.
346   It fails the fixnump(header) test.
347 - Thread C sees that next_offset is still 12,
348   so it skips by the page's object spacing instead, and will continue
349   to do so until hitting the end of the page.
350 */
351 
352 //// The collector
353 
update_immobile_nursery_bits()354 void update_immobile_nursery_bits()
355 {
356   low_page_index_t page;
357   lispobj fixedobj_free_ptr = SYMBOL(IMMOBILE_FIXEDOBJ_FREE_POINTER)->value;
358   lispobj varyobj_free_ptr = SYMBOL(IMMOBILE_SPACE_FREE_POINTER)->value;
359 
360   // Find the high water marks for this GC scavenge phase
361   // [avoid passing exactly IMMOBILE_SPACE_END, which has no page index]
362   max_used_fixedobj_page = find_immobile_page_index((void*)(fixedobj_free_ptr-1));
363   max_used_varyobj_page = find_immobile_page_index((void*)(varyobj_free_ptr-1));
364 
365   immobile_scav_queue = (unsigned int*)low_page_address(max_used_varyobj_page+1);
366   gc_assert((IMMOBILE_SPACE_END - (uword_t)immobile_scav_queue) / sizeof(int)
367             >= QCAPACITY);
368 
369   // Unprotect the in-use ranges. Any page could be written during scavenge
370   os_protect((os_vm_address_t)IMMOBILE_SPACE_START,
371              fixedobj_free_ptr - IMMOBILE_SPACE_START,
372              OS_VM_PROT_ALL);
373 
374   // varyobj_free_ptr is typically not page-aligned - only by random chance
375   // might it be. Additionally we need a page beyond that for the re-scan queue.
376   os_vm_address_t limit = (char*)immobile_scav_queue + IMMOBILE_CARD_BYTES;
377   os_protect((os_vm_address_t)(IMMOBILE_VARYOBJ_SUBSPACE_START),
378              limit - (os_vm_address_t)IMMOBILE_VARYOBJ_SUBSPACE_START,
379              OS_VM_PROT_ALL);
380 
381   for (page=0; page <= max_used_fixedobj_page ; ++page) {
382       // any page whose free index changed contains nursery objects
383       if (fixedobj_pages[page].free_index >> WORD_SHIFT !=
384           fixedobj_pages[page].prior_gc_free_word_index)
385           fixedobj_pages[page].gens |= 1;
386 #ifdef VERIFY_PAGE_GENS
387       check_fixedobj_page(page);
388 #endif
389   }
390 #ifdef VERIFY_PAGE_GENS
391   check_varyobj_pages();
392 #endif
393 }
394 
395 #ifdef SIMPLE_CHARACTER_STRING_WIDETAG
396 #define MAXIMUM_STRING_WIDETAG SIMPLE_CHARACTER_STRING_WIDETAG
397 #else
398 #define MAXIMUM_STRING_WIDETAG SIMPLE_BASE_STRING_WIDETAG
399 #endif
400 
unboxed_array_p(int widetag)401 static inline boolean unboxed_array_p(int widetag)
402 {
403     // This is not an exhaustive test for unboxed objects,
404     // but it's enough to avoid some unnecessary scavenging.
405     return (widetag >= SIMPLE_ARRAY_UNSIGNED_BYTE_2_WIDETAG
406             && widetag <= MAXIMUM_STRING_WIDETAG
407             && widetag != SIMPLE_VECTOR_WIDETAG);
408 }
409 
410 /* Turn a white object grey. Also enqueue the object for re-scan if required */
411 void
promote_immobile_obj(lispobj * ptr,int rescan)412 promote_immobile_obj(lispobj *ptr, int rescan) // a native pointer
413 {
414     if (widetag_of(*ptr) == SIMPLE_FUN_HEADER_WIDETAG)
415         ptr = (lispobj*)code_obj_from_simple_fun((struct simple_fun*)ptr);
416     gc_assert(__immobile_obj_gen_bits(ptr) == from_space);
417     int pointerish = !unboxed_array_p(widetag_of(*ptr));
418     assign_generation(ptr, (pointerish ? 0 : IMMOBILE_OBJ_VISITED_FLAG) | new_space);
419     low_page_index_t page_index = find_immobile_page_index(ptr);
420 
421     if (page_index >= FIRST_VARYOBJ_PAGE) {
422         VARYOBJ_PAGE_GENS(page_index) |= 1<<new_space;
423     } else {
424         fixedobj_pages[page_index].gens |= 1<<new_space;
425     }
426     // If called from preserve_pointer(), then we haven't scanned immobile
427     // roots yet, so we only need ensure that this object's page's WP bit
428     // is cleared so that the page is not skipped during root scan.
429     if (!rescan) {
430         if (pointerish) {
431             if (page_index >= FIRST_VARYOBJ_PAGE)
432                 varyobj_page_touched_bits[(page_index-FIRST_VARYOBJ_PAGE)/32]
433                     |= 1 << (page_index & 31);
434             else
435                 SET_WP_FLAG(page_index, WRITE_PROTECT_CLEARED);
436         }
437         return; // No need to enqueue.
438     }
439 
440     // Do nothing if either we don't need to look for pointers in this object,
441     // or the work queue has already overflowed, causing a full scan.
442     if (!pointerish || immobile_scav_queue_count > QCAPACITY) return;
443 
444     // count is either less than or equal to QCAPACITY.
445     // If equal, just bump the count to signify overflow.
446     if (immobile_scav_queue_count < QCAPACITY) {
447         immobile_scav_queue[immobile_scav_queue_head] =
448             (uword_t)ptr & 0xFFFFFFFF; // Drop the high bits
449         immobile_scav_queue_head = (immobile_scav_queue_head + 1) & (QCAPACITY - 1);
450     }
451     ++immobile_scav_queue_count;
452 }
453 
454 /* If 'addr' points to an immobile object, then make the object
455    live by promotion. But if the object is not in the generation
456    being collected, do nothing */
immobile_space_preserve_pointer(void * addr)457 void immobile_space_preserve_pointer(void* addr)
458 {
459     low_page_index_t page_index = find_immobile_page_index(addr);
460     if (page_index < 0)
461         return;
462 
463     lispobj* header_addr;
464     if (page_index >= FIRST_VARYOBJ_PAGE) {
465         // Restrict addr to lie below IMMOBILE_SPACE_FREE_POINTER.
466         // This way, if the gens byte is nonzero but there is
467         // a final array acting as filler on the remainder of the
468         // final page, we won't accidentally find that.
469         lispobj* start;
470         if ((lispobj)addr >= SYMBOL(IMMOBILE_SPACE_FREE_POINTER)->value
471             || !(varyobj_page_gens_augmented(page_index) & (1<<from_space))
472             || (start = varyobj_scan_start(page_index)) > (lispobj*)addr)
473             return;
474         header_addr = gc_search_space(start,
475                                       native_pointer((lispobj)addr)+2 - start,
476                                       (lispobj*)addr);
477         if (!header_addr ||
478             immobile_filler_p(header_addr) ||
479             (widetag_of(*header_addr) != CODE_HEADER_WIDETAG &&
480              !properly_tagged_descriptor_p((lispobj)addr, header_addr)))
481             return;
482     } else if (fixedobj_pages[page_index].gens & (1<<from_space)) {
483         int obj_spacing = (page_obj_align(page_index) << WORD_SHIFT);
484         int obj_index = ((uword_t)addr & (IMMOBILE_CARD_BYTES-1)) / obj_spacing;
485         dprintf((logfile,"Pointer %p is to immobile page %d, object %d\n",
486                  addr, page_index, obj_index));
487         char* page_start_addr = (char*)((uword_t)addr & ~(IMMOBILE_CARD_BYTES-1));
488         header_addr = (lispobj*)(page_start_addr + obj_index * obj_spacing);
489         if (fixnump(*header_addr) ||
490             (lispobj*)addr >= header_addr + page_obj_size(page_index) ||
491             !properly_tagged_descriptor_p((lispobj)addr, header_addr))
492            return;
493     } else {
494         return;
495     }
496     if (__immobile_obj_gen_bits(header_addr) == from_space) {
497         dprintf((logfile,"immobile obj @ %p (<- %p) is conservatively live\n",
498                  header_addr, addr));
499         promote_immobile_obj(header_addr, 0);
500     }
501 }
502 
503 // Loop over the newly-live objects, scavenging them for pointers.
504 // As with the ordinary gencgc algorithm, this uses almost no stack.
full_scavenge_immobile_newspace()505 static void full_scavenge_immobile_newspace()
506 {
507     page_index_t page;
508     unsigned char bit = 1<<new_space;
509 
510     // Fixed-size object pages.
511 
512     for (page = 0; page <= max_used_fixedobj_page; ++page) {
513         if (!(fixedobj_pages[page].gens & bit)) continue;
514         // Skip amount within the loop is in bytes.
515         int obj_spacing = page_obj_align(page) << WORD_SHIFT;
516         int n_words     = page_obj_size(page);
517         lispobj* obj    = low_page_address(page);
518         lispobj* limit  = (lispobj*)((char*)obj +
519                                      IMMOBILE_CARD_BYTES - obj_spacing);
520         for ( ; obj <= limit ; obj = (lispobj*)((char*)obj + obj_spacing) ) {
521             if (!fixnump(*obj) && __immobile_obj_gen_bits(obj) == new_space) {
522                 set_visited(obj);
523                 scavenge(obj, n_words);
524             }
525         }
526     }
527 
528     // Variable-size object pages
529 
530     page = FIRST_VARYOBJ_PAGE - 1; // Subtract 1 because of pre-increment
531     while (1) {
532         // Find the next page with anything in newspace.
533         do {
534             if (++page > max_used_varyobj_page) return;
535         } while ((VARYOBJ_PAGE_GENS(page) & bit) == 0);
536         lispobj* obj = varyobj_scan_start(page);
537         do {
538             lispobj* limit = (lispobj*)low_page_address(page) + WORDS_PER_PAGE;
539             int widetag, n_words;
540             for ( ; obj < limit ; obj += n_words ) {
541                 n_words = sizetab[widetag = widetag_of(*obj)](obj);
542                 if (__immobile_obj_gen_bits(obj) == new_space) {
543                     set_visited(obj);
544                     scavenge(obj, n_words);
545                 }
546             }
547             page = find_immobile_page_index(obj);
548             // Bail out if exact absolute end of immobile space was reached.
549             if (page < 0) return;
550             // If 'page' should be scanned, then pick up where we left off,
551             // without recomputing 'obj' but setting a higher 'limit'.
552         } while (VARYOBJ_PAGE_GENS(page) & bit);
553     }
554 }
555 
556 /// Repeatedly scavenge immobile newspace work queue until we find no more
557 /// reachable objects within. (They might be in dynamic space though).
558 /// If queue overflow already happened, then a worst-case full scan is needed.
559 /// If it didn't, we try to drain the queue, hoping that overflow does
560 /// not happen while doing so.
561 /// The approach taken is more subtle than just dequeuing each item,
562 /// scavenging, and letting the outer 'while' loop take over.
563 /// That would be ok, but could cause more full scans than necessary.
564 /// Instead, since each entry in the queue is useful information
565 /// in the non-overflow condition, perform all the work indicated thereby,
566 /// rather than considering the queue discardable as soon as overflow happens.
567 /// Essentially we just have to capture the valid span of enqueued items,
568 /// because the queue state is inconsistent when 'count' exceeds 'capacity'.
scavenge_immobile_newspace()569 void scavenge_immobile_newspace()
570 {
571   while (immobile_scav_queue_count) {
572       if (immobile_scav_queue_count > QCAPACITY) {
573           immobile_scav_queue_count = 0;
574           full_scavenge_immobile_newspace();
575       } else {
576           int queue_index_from = (immobile_scav_queue_head - immobile_scav_queue_count)
577                                & (QCAPACITY - 1);
578           int queue_index_to   = immobile_scav_queue_head;
579           int i = queue_index_from;
580           // The termination condition can't be expressed as an inequality,
581           // since the indices might be reversed due to wraparound.
582           // To express as equality entails forcing at least one iteration
583           // since the ending index might be the starting index.
584           do {
585               lispobj* obj = (lispobj*)(uword_t)immobile_scav_queue[i];
586               i = (1 + i) & (QCAPACITY-1);
587               // Only decrement the count if overflow did not happen.
588               // The first iteration of this loop will decrement for sure,
589               // but subsequent iterations might not.
590               if (immobile_scav_queue_count <= QCAPACITY)
591                   --immobile_scav_queue_count;
592               if (!(__immobile_obj_gen_bits(obj) & IMMOBILE_OBJ_VISITED_FLAG)) {
593                 set_visited(obj);
594                 scavenge(obj, sizetab[widetag_of(*obj)](obj));
595               }
596           } while (i != queue_index_to);
597       }
598   }
599 }
600 
601 // Return a page >= page_index having potential old->young pointers,
602 // or -1 if there isn't one.
next_varyobj_root_page(unsigned int page_index,unsigned int end_bitmap_index,unsigned char genmask)603 static int next_varyobj_root_page(unsigned int page_index,
604                                   unsigned int end_bitmap_index,
605                                   unsigned char genmask)
606 {
607     unsigned int map_index = (page_index - FIRST_VARYOBJ_PAGE) / 32;
608     if (map_index >= end_bitmap_index) return -1;
609     int bit_index = page_index & 31;
610     // Look only at bits of equal or greater weight than bit_index.
611     unsigned int word = (0xFFFFFFFFU << bit_index) & varyobj_page_touched_bits[map_index];
612     while (1) {
613         if (word) {
614             bit_index = ffs(word) - 1;
615             page_index = FIRST_VARYOBJ_PAGE + map_index * 32 + bit_index;
616             if (varyobj_page_gens_augmented(page_index) & genmask)
617                 return page_index;
618             else {
619                 word ^= (1<<bit_index);
620                 continue;
621             }
622         }
623         if (++map_index >= end_bitmap_index) return -1;
624         word = varyobj_page_touched_bits[map_index];
625     }
626 }
627 
628 void
scavenge_immobile_roots(generation_index_t min_gen,generation_index_t max_gen)629 scavenge_immobile_roots(generation_index_t min_gen, generation_index_t max_gen)
630 {
631     // example: scavenging gens 2..6, the mask of root gens is #b1111100
632     int genmask = ((1 << (max_gen - min_gen + 1)) - 1) << min_gen;
633 
634     low_page_index_t page;
635     for (page = 0; page <= max_used_fixedobj_page ; ++page) {
636         if (fixedobj_page_wp(page) || !(fixedobj_pages[page].gens & genmask))
637             continue;
638         int obj_spacing = page_obj_align(page) << WORD_SHIFT;
639         int n_words = page_obj_size(page);
640         lispobj* obj = low_page_address(page);
641         lispobj* limit = (lispobj*)((char*)obj +
642                                     IMMOBILE_CARD_BYTES - obj_spacing);
643         int gen;
644         // Immobile space can only contain objects with a header word,
645         // no conses, so any fixnum where a header could be is not a live
646         // object.
647         do {
648             if (!fixnump(*obj) && (genmask >> (gen=__immobile_obj_gen_bits(obj)) & 1)) {
649                 if (gen == new_space) { set_visited(obj); }
650                 scavenge(obj, n_words);
651             }
652         } while ((obj = (lispobj*)((char*)obj + obj_spacing)) <= limit);
653     }
654 
655     // Variable-length object pages
656     unsigned n_varyobj_pages = 1+max_used_varyobj_page-FIRST_VARYOBJ_PAGE;
657     unsigned end_bitmap_index = (n_varyobj_pages+31)/32;
658     page = next_varyobj_root_page(FIRST_VARYOBJ_PAGE, end_bitmap_index, genmask);
659     while (page >= 0) {
660         lispobj* obj = varyobj_scan_start(page);
661         do {
662             lispobj* limit = (lispobj*)low_page_address(page) + WORDS_PER_PAGE;
663             int widetag, n_words, gen;
664             for ( ; obj < limit ; obj += n_words ) {
665                 n_words = sizetab[widetag = widetag_of(*obj)](obj);
666                 if (genmask >> (gen=__immobile_obj_gen_bits(obj)) & 1) {
667                     if (gen == new_space) { set_visited(obj); }
668                     scavenge(obj, n_words);
669                 }
670             }
671             page = find_immobile_page_index(obj);
672         } while (page > 0
673                  && (VARYOBJ_PAGE_GENS(page) & genmask)
674                  && varyobj_page_touched(page));
675         page = next_varyobj_root_page(1+page, end_bitmap_index, genmask);
676     }
677     scavenge_immobile_newspace();
678 }
679 
680 #include "genesis/layout.h"
681 #define LAYOUT_SIZE (sizeof (struct layout)/N_WORD_BYTES)
682 
683 // As long as Lisp doesn't have any native allocators (vops and whatnot)
684 // it doesn't need to access these values.
685 int layout_page_hint, symbol_page_hint, fdefn_page_hint;
686 
687 // For the three different page characteristics that we need,
688 // claim a page that works for those characteristics.
set_immobile_space_hints()689 void set_immobile_space_hints()
690 {
691   // The allocator doesn't check whether each 'hint' points to an
692   // expected kind of page, so we have to ensure up front that
693   // allocations start on different pages. i.e. You can point to
694   // a totally full page, but you can't point to a wrong page.
695   // It doesn't work to just assign these to consecutive integers
696   // without also updating the page attributes.
697 
698   // Object sizes must be multiples of 2 because the n_words value we pass
699   // to scavenge() is gotten from the page attributes, and scavenge asserts
700   // that the ending address is aligned to a doubleword boundary as expected.
701 
702   // LAYOUTs are 256-byte-aligned so that the low byte contains no information.
703   // This makes it possible to recover a layout pointer from an instance header
704   // by simply changing the low byte to instance-pointer-lowtag.
705   // As a test of objects using larger-than-required alignment,
706   // the 64-bit implementation uses 256-byte alignment for layouts,
707   // even though the header can store all bits of the layout pointer.
708   // The 32-bit implementation would also need somewhere different to store
709   // the generation byte of each layout, which is a minor annoyance.
710   layout_page_hint = get_freeish_page(0, MAKE_ATTR(256/N_WORD_BYTES, // spacing
711                                                    CEILING(LAYOUT_SIZE,2),
712                                                    0));
713   symbol_page_hint = get_freeish_page(0, MAKE_ATTR(CEILING(SYMBOL_SIZE,2),
714                                                    CEILING(SYMBOL_SIZE,2),
715                                                    0));
716   fdefn_page_hint = get_freeish_page(0, MAKE_ATTR(CEILING(FDEFN_SIZE,2),
717                                                   CEILING(FDEFN_SIZE,2),
718                                                   0));
719 }
720 
write_protect_immobile_space()721 void write_protect_immobile_space()
722 {
723     immobile_scav_queue = NULL;
724     immobile_scav_queue_head = 0;
725 
726     set_immobile_space_hints();
727 
728     // Now find contiguous ranges of pages that are protectable,
729     // minimizing the number of system calls as much as possible.
730     int i, start = -1, end = -1; // inclusive bounds on page indices
731     for (i = max_used_fixedobj_page ; i >= 0 ; --i) {
732         if (fixedobj_page_wp(i)) {
733             if (end < 0) end = i;
734             start = i;
735         }
736         if (end >= 0 && (!fixedobj_page_wp(i) || i == 0)) {
737             os_protect(low_page_address(start),
738                        IMMOBILE_CARD_BYTES * (1 + end - start),
739                        OS_VM_PROT_READ|OS_VM_PROT_EXECUTE);
740             start = end = -1;
741         }
742     }
743 #define varyobj_page_wp(x) !varyobj_page_touched(x)
744     for (i = max_used_varyobj_page ; i >= FIRST_VARYOBJ_PAGE ; --i) {
745         if (varyobj_page_wp(i)) {
746             if (end < 0) end = i;
747             start = i;
748         }
749         if (end >= 0 && (!varyobj_page_wp(i) || i == FIRST_VARYOBJ_PAGE)) {
750             os_protect(low_page_address(start),
751                        IMMOBILE_CARD_BYTES * (1 + end - start),
752                        OS_VM_PROT_READ|OS_VM_PROT_EXECUTE);
753             start = end = -1;
754         }
755     }
756 #undef varyobj_page_wp
757 }
758 
759 // Scan range between start and end (exclusive) for old-to-young pointers.
760 // 'keep_gen' is the value of the generation byte of objects that were
761 // candidates to become garbage, but remain live after this gc.
762 // It will necessarily have the VISITED flag on.
763 // 'new_gen' is the generation number that those objects will have
764 // after collection, which is either the same generation or one higher,
765 // depending on the 'raise' flag for this GC cycle.
766 static int
range_points_to_younger_p(lispobj * obj,lispobj * end,int gen,int keep_gen,int new_gen)767 range_points_to_younger_p(lispobj* obj, lispobj* end,
768                           int gen, int keep_gen, int new_gen)
769 {
770 #ifdef DEBUG
771   lispobj* __attribute__((unused)) saved_obj = obj, __attribute__((unused)) header = *obj;
772 #endif
773     do {
774         lispobj thing = *obj;
775         if (is_lisp_pointer(thing)) {
776             int to_page = find_page_index((void*)thing),
777                 to_gen = 255;
778             if (to_page >= 0) { // points to ordinary dynamic space
779                 to_gen = page_table[to_page].gen;
780                 if (to_gen == PSEUDO_STATIC_GENERATION+1) // scratch gen
781                     to_gen = new_gen; // is actually this
782             } else if (immobile_space_p(thing)) {
783                 // Processing the code-entry-points slot of a code component
784                 // requires the general variant of immobile_obj_gen_bits
785                 // because the pointed-to object is a simple-fun.
786                 to_gen = immobile_obj_gen_bits(native_pointer(thing));
787                 if (to_gen == keep_gen) // keep gen
788                     to_gen = new_gen; // is actually this
789             }
790             if (to_gen < gen) {
791                 return 1; // yes, points to younger
792             }
793         }
794     } while (++obj < end);
795     return 0; // no, does not point to younger
796 }
797 
798 // Scan a fixed-size object for old-to-young pointers.
799 // Since fixed-size objects are boxed and on known boundaries,
800 // we never start in the middle of random bytes, so the answer is exact.
801 static inline boolean
fixedobj_points_to_younger_p(lispobj * obj,int n_words,int gen,int keep_gen,int new_gen)802 fixedobj_points_to_younger_p(lispobj* obj, int n_words,
803                              int gen, int keep_gen, int new_gen)
804 {
805     return range_points_to_younger_p(obj+1, obj+n_words,
806                                      gen, keep_gen, new_gen);
807 }
808 
809 static boolean
varyobj_points_to_younger_p(lispobj * obj,int gen,int keep_gen,int new_gen,os_vm_address_t page_begin,os_vm_address_t page_end)810 varyobj_points_to_younger_p(lispobj* obj, int gen, int keep_gen, int new_gen,
811                             os_vm_address_t page_begin,
812                             os_vm_address_t page_end) // upper (exclusive) bound
813 {
814     lispobj *begin, *end, word = *obj;
815     unsigned char widetag = widetag_of(word);
816     if (widetag == CODE_HEADER_WIDETAG) { // usual case. Like scav_code_header()
817         for_each_simple_fun(i, function_ptr, (struct code*)obj, 0, {
818             begin = SIMPLE_FUN_SCAV_START(function_ptr);
819             end   = begin + SIMPLE_FUN_SCAV_NWORDS(function_ptr);
820             if (page_begin > (os_vm_address_t)begin) begin = (lispobj*)page_begin;
821             if (page_end   < (os_vm_address_t)end)   end   = (lispobj*)page_end;
822             if (end > begin
823                 && range_points_to_younger_p(begin, end, gen, keep_gen, new_gen))
824                 return 1;
825         })
826         begin = obj + 1; // skip the header
827         end = obj + code_header_words(word); // exclusive bound on boxed slots
828     } else if (widetag == SIMPLE_VECTOR_WIDETAG) {
829         sword_t length = fixnum_value(((struct vector *)obj)->length);
830         begin = obj + 2; // skip the header and length
831         end = obj + CEILING(length + 2, 2);
832     } else if (widetag >= SIMPLE_ARRAY_UNSIGNED_BYTE_2_WIDETAG &&
833                widetag <= MAXIMUM_STRING_WIDETAG) {
834         return 0;
835     } else {
836         lose("Unexpected widetag @ %p", obj);
837     }
838     // Fallthrough: scan words from begin to end
839     if (page_begin > (os_vm_address_t)begin) begin = (lispobj*)page_begin;
840     if (page_end   < (os_vm_address_t)end)   end   = (lispobj*)page_end;
841     if (end > begin && range_points_to_younger_p(begin, end, gen, keep_gen, new_gen))
842         return 1;
843     return 0;
844 }
845 
846 /// The next two functions are analogous to 'update_page_write_prot()'
847 /// but they differ in that they are "precise" - random code bytes that look
848 /// like pointers are not accidentally treated as pointers.
849 
850 // If 'page' does not contain any objects that points to an object
851 // younger than themselves, then return true.
852 // This is called on pages that do not themselves contain objects of
853 // the generation being collected, but might contain pointers
854 // to younger generations, which we detect by a cleared WP status bit.
855 // The bit is cleared on any write, though, even of a non-pointer,
856 // so this unfortunately has to be tested much more often than we'd like.
can_wp_fixedobj_page(page_index_t page,int keep_gen,int new_gen)857 static inline boolean can_wp_fixedobj_page(page_index_t page, int keep_gen, int new_gen)
858 {
859     int obj_spacing = page_obj_align(page) << WORD_SHIFT;
860     int obj_size_words = page_obj_size(page);
861     lispobj* obj = low_page_address(page);
862     lispobj* limit = (lispobj*)((char*)obj + IMMOBILE_CARD_BYTES - obj_spacing);
863     for ( ; obj <= limit ; obj = (lispobj*)((char*)obj + obj_spacing) )
864         if (!fixnump(*obj) && // an object header
865             fixedobj_points_to_younger_p(obj, obj_size_words,
866                                          __immobile_obj_generation(obj),
867                                          keep_gen, new_gen))
868             return 0;
869     return 1;
870 }
871 
872 // To scan _only_ 'page' is impossible in general, but we can act like only
873 // one page was scanned by backing up to the first object whose end is on
874 // or after it, and then restricting points_to_younger within the boundaries.
875 // Doing it this way is probably much better than conservatively assuming
876 // that any word satisfying is_lisp_pointer() is a pointer.
can_wp_varyobj_page(page_index_t page,int keep_gen,int new_gen)877 static inline boolean can_wp_varyobj_page(page_index_t page, int keep_gen, int new_gen)
878 {
879     lispobj *begin = (lispobj*)low_page_address(page);
880     lispobj *end   = begin + WORDS_PER_PAGE;
881     lispobj *obj   = varyobj_scan_start(page);
882     for ( ; obj < end ; obj += sizetab[widetag_of(*obj)](obj) ) {
883         gc_assert(other_immediate_lowtag_p(*obj));
884         if (!immobile_filler_p(obj) &&
885             varyobj_points_to_younger_p(obj,
886                                         __immobile_obj_generation(obj),
887                                         keep_gen, new_gen,
888                                         (os_vm_address_t)begin,
889                                         (os_vm_address_t)end))
890             return 0;
891     }
892     return 1;
893 }
894 
895 /*
896   Sweep immobile space by zeroing the memory of trashed objects
897   and linking them into the freelist.
898 
899   Possible improvements:
900   - If an entire page becomes nothing but holes, we could bzero it
901     instead of object-at-a-time clearing. But it's not known to be
902     so until after the sweep, so it would entail two passes per page,
903     one to mark holes and one to zero them.
904   - And perhaps bzero could be used on ranges of holes, because
905     in that case each hole's pointer to the next hole is zero as well.
906 */
907 
908 #define SETUP_GENS()                                                   \
909   /* Only care about pages with something in old or new space. */      \
910   int relevant_genmask = (1 << from_space) | (1 << new_space);         \
911   /* Objects whose gen byte is 'keep_gen' are alive. */                \
912   int keep_gen = IMMOBILE_OBJ_VISITED_FLAG | new_space;                \
913   /* Objects whose gen byte is 'from_space' are trash. */              \
914   int discard_gen = from_space;                                        \
915   /* Moving non-garbage into either 'from_space' or 'from_space+1' */  \
916   generation_index_t new_gen = from_space + (raise!=0)
917 
918 // The new value of the page generation mask is computed as follows:
919 // If 'raise' = 1 then:
920 //     Nothing resides in 'from_space', and 'from_space+1' gains new objects
921 //     if and only if any objects on the page were retained.
922 // If 'raise' = 0 then:
923 //     Nothing resides in the scratch generation, and 'from_space'
924 //     has objects if and only if any objects were retained.
925 #define COMPUTE_NEW_MASK(var, old) \
926   int var = old & ~(1<<from_space); \
927   if ( raise ) \
928     var |= 1<<(from_space+1) & any_kept; \
929   else \
930     var = (var & ~(1<<new_space)) | (1<<from_space & any_kept)
931 
932 static void
sweep_fixedobj_pages(int raise)933 sweep_fixedobj_pages(int raise)
934 {
935     char *page_base;
936     lispobj *obj, *limit, *hole;
937     // This will be needed for space accounting.
938     // threads might fail to consume all the space on a page.
939     // By storing in the page table the count of holes that really existed
940     // at the start of the prior GC, and subtracting from that the number
941     // that exist now, we know how much usable space was obtained (per page).
942     int n_holes = 0;
943     int word_idx;
944 
945     SETUP_GENS();
946 
947     low_page_index_t page;
948     for (page = 0; page <= max_used_fixedobj_page; ++page) {
949         // On pages that won't need manipulation of the freelist,
950         // we try to do less work than for pages that need it.
951         if (!(fixedobj_pages[page].gens & relevant_genmask)) {
952             // Scan for old->young pointers, and WP if there are none.
953             if (!fixedobj_page_wp(page) && fixedobj_pages[page].gens > 1
954                 && can_wp_fixedobj_page(page, keep_gen, new_gen))
955                 SET_WP_FLAG(page, WRITE_PROTECT);
956             continue;
957         }
958         int obj_spacing = page_obj_align(page) << WORD_SHIFT;
959         int obj_size_words = page_obj_size(page);
960         page_base = low_page_address(page);
961         limit = (lispobj*)(page_base + IMMOBILE_CARD_BYTES - obj_spacing);
962         obj = (lispobj*)page_base;
963         hole = NULL;
964         int any_kept = 0; // was anything moved to the kept generation
965         n_holes = 0;
966 
967         // wp_it is 1 if we should try to write-protect it now.
968         // If already write-protected, skip the tests.
969         int wp_it = !fixedobj_page_wp(page);
970         int gen;
971         for ( ; obj <= limit ; obj = (lispobj*)((char*)obj + obj_spacing) ) {
972             if (fixnump(*obj)) { // was already a hole
973             trash_it:
974                 // re-link it into the new freelist
975                 if (hole)
976                     // store the displacement from the end of the object
977                     // at prev_hole to the start of this object.
978                     *hole = (lispobj)((char*)obj - ((char*)hole + obj_spacing));
979                 else // this is the first seen hole on the page
980                     // record the byte offset to that hole
981                   fixedobj_pages[page].free_index = (char*)obj - page_base;
982                 hole = obj;
983                 n_holes ++;
984             } else if ((gen = __immobile_obj_gen_bits(obj)) == discard_gen) { // trash
985                 for (word_idx=obj_size_words-1 ; word_idx > 0 ; --word_idx)
986                     obj[word_idx] = 0;
987                 goto trash_it;
988             } else if (gen == keep_gen) {
989                 assign_generation(obj, gen = new_gen);
990 #ifdef DEBUG
991                 gc_assert(!fixedobj_points_to_younger_p(obj, obj_size_words,
992                                                         gen, keep_gen, new_gen));
993 #endif
994                 any_kept = -1;
995             } else if (wp_it && fixedobj_points_to_younger_p(obj, obj_size_words,
996                                                              gen, keep_gen, new_gen))
997               wp_it = 0;
998         }
999         if ( hole ) // terminate the chain of holes
1000             *hole = (lispobj)((char*)obj - ((char*)hole + obj_spacing));
1001         fixedobj_pages[page].prior_gc_free_word_index =
1002           fixedobj_pages[page].free_index >> WORD_SHIFT;
1003 
1004         COMPUTE_NEW_MASK(mask, fixedobj_pages[page].gens);
1005         if ( mask ) {
1006             fixedobj_pages[page].gens = mask;
1007             if (wp_it) {
1008                 SET_WP_FLAG(page, WRITE_PROTECT);
1009                 dprintf((logfile, "Lowspace: set WP on page %d\n", page));
1010             }
1011         } else {
1012             dprintf((logfile,"page %d is all garbage\n", page));
1013             fixedobj_pages[page].attr.packed = 0;
1014         }
1015 #ifdef DEBUG
1016         check_fixedobj_page(page);
1017 #endif
1018         dprintf((logfile,"page %d: %d holes\n", page, n_holes));
1019     }
1020 }
1021 
1022 void verify_immobile_page_protection(int,int);
1023 
1024 // Scan for freshly trashed objects and turn them into filler.
1025 // Lisp is responsible for consuming the free space
1026 // when it next allocates a variable-size object.
1027 static void
sweep_varyobj_pages(int raise)1028 sweep_varyobj_pages(int raise)
1029 {
1030     SETUP_GENS();
1031 
1032     lispobj* free_pointer = (lispobj*)SYMBOL(IMMOBILE_SPACE_FREE_POINTER)->value;
1033     low_page_index_t page;
1034     for (page = FIRST_VARYOBJ_PAGE; page <= max_used_varyobj_page; ++page) {
1035         int genmask = VARYOBJ_PAGE_GENS(page);
1036         if (!(genmask & relevant_genmask)) { // Has nothing in oldspace or newspace.
1037             // Scan for old->young pointers, and WP if there are none.
1038             if (varyobj_page_touched(page)
1039                 && varyobj_page_gens_augmented(page) > 1
1040                 && can_wp_varyobj_page(page, keep_gen, new_gen))
1041                 varyobj_page_touched_bits[(page - FIRST_VARYOBJ_PAGE)/32] &= ~(1<<(page & 31));
1042             continue;
1043         }
1044         lispobj* page_base = (lispobj*)low_page_address(page);
1045         lispobj* limit = page_base + WORDS_PER_PAGE;
1046         if (limit > free_pointer) limit = free_pointer;
1047         int any_kept = 0; // was anything moved to the kept generation
1048         // wp_it is 1 if we should try to write-protect it now.
1049         // If already write-protected, skip the tests.
1050         int wp_it = varyobj_page_touched(page);
1051         lispobj* obj = varyobj_scan_start(page);
1052         int size, gen;
1053 
1054         if (obj < page_base) {
1055             // An object whose tail is on this page, or which spans this page,
1056             // would have been promoted/kept while dealing with the page with
1057             // the object header. Therefore we don't need to consider that object,
1058             // * except * that we do need to consider whether it is an old object
1059             // pointing to a young object.
1060             if (wp_it // If we wanted to try write-protecting this page,
1061                 // and the object starting before this page is strictly older
1062                 // than the generation that we're moving retained objects into
1063                 && (gen = __immobile_obj_gen_bits(obj)) > new_gen
1064                 // and it contains an old->young pointer
1065                 && varyobj_points_to_younger_p(obj, gen, keep_gen, new_gen,
1066                                                (os_vm_address_t)page_base,
1067                                                (os_vm_address_t)limit)) {
1068                 wp_it = 0;
1069             }
1070             // We MUST skip this object in the sweep, because in the case of
1071             // non-promotion (raise=0), we could see an object in from_space
1072             // and believe it to be dead.
1073             obj += sizetab[widetag_of(*obj)](obj);
1074             // obj can't hop over this page. If it did, there would be no
1075             // headers on the page, and genmask would have been zero.
1076             gc_assert(obj < limit);
1077         }
1078         for ( ; obj < limit ; obj += size ) {
1079             lispobj word = *obj;
1080             size = sizetab[widetag_of(word)](obj);
1081             if (immobile_filler_p(obj)) { // do nothing
1082             } else if ((gen = __immobile_obj_gen_bits(obj)) == discard_gen) {
1083                 if (size < 4)
1084                     lose("immobile object @ %p too small to free", obj);
1085                 else { // Create a filler object.
1086                     struct code* code  = (struct code*)obj;
1087                     code->header       = 2<<N_WIDETAG_BITS | CODE_HEADER_WIDETAG;
1088                     code->code_size    = make_fixnum((size - 2) * N_WORD_BYTES);
1089                     code->debug_info   = varyobj_holes;
1090                     varyobj_holes      = (lispobj)code;
1091                 }
1092             } else if (gen == keep_gen) {
1093                 assign_generation(obj, gen = new_gen);
1094 #ifdef DEBUG
1095                 gc_assert(!varyobj_points_to_younger_p(obj, gen, keep_gen, new_gen,
1096                                                        (os_vm_address_t)page_base,
1097                                                        (os_vm_address_t)limit));
1098 #endif
1099                 any_kept = -1;
1100             } else if (wp_it &&
1101                        varyobj_points_to_younger_p(obj, gen, keep_gen, new_gen,
1102                                                    (os_vm_address_t)page_base,
1103                                                    (os_vm_address_t)limit))
1104                 wp_it = 0;
1105         }
1106         COMPUTE_NEW_MASK(mask, VARYOBJ_PAGE_GENS(page));
1107         VARYOBJ_PAGE_GENS(page) = mask;
1108         if ( mask && wp_it )
1109             varyobj_page_touched_bits[(page - FIRST_VARYOBJ_PAGE)/32] &= ~(1 << (page & 31));
1110     }
1111 #ifdef DEBUG
1112     verify_immobile_page_protection(keep_gen, new_gen);
1113 #endif
1114 }
1115 
compute_immobile_space_bound()1116 static void compute_immobile_space_bound()
1117 {
1118     int max;
1119     // find the highest page in use
1120     for (max = FIRST_VARYOBJ_PAGE-1 ; max >= 0 ; --max)
1121         if (fixedobj_pages[max].attr.parts.obj_size)
1122             break;
1123     max_used_fixedobj_page = max; // this is a page index, not the number of pages.
1124     SYMBOL(IMMOBILE_FIXEDOBJ_FREE_POINTER)->value =
1125         IMMOBILE_SPACE_START + IMMOBILE_CARD_BYTES*(1+max);
1126 
1127     for (max = (IMMOBILE_SPACE_SIZE/IMMOBILE_CARD_BYTES)-1 ;
1128          max >= FIRST_VARYOBJ_PAGE ; --max)
1129         if (varyobj_page_gens_augmented(max))
1130             break;
1131      max_used_varyobj_page = max; // this is a page index, not the number of pages.
1132 }
1133 
1134 // TODO: (Maybe this won't work. Not sure yet.) rather than use the
1135 // same 'raise' concept as in gencgc, each immobile object can store bits
1136 // indicating whether it has survived any GC at its current generation.
1137 // If it has, then it gets promoted next time, rather than all or nothing
1138 // being promoted from the generation getting collected.
1139 void
sweep_immobile_space(int raise)1140 sweep_immobile_space(int raise)
1141 {
1142   gc_assert(immobile_scav_queue_count == 0);
1143   sweep_fixedobj_pages(raise);
1144   sweep_varyobj_pages(raise);
1145   compute_immobile_space_bound();
1146 }
1147 
gc_init_immobile()1148 void gc_init_immobile()
1149 {
1150 #ifdef DEBUG
1151     logfile = stderr;
1152 #endif
1153     int n_fixedobj_pages = FIRST_VARYOBJ_PAGE;
1154     int n_varyobj_pages = (IMMOBILE_SPACE_SIZE - IMMOBILE_FIXEDOBJ_SUBSPACE_SIZE)
1155                           / IMMOBILE_CARD_BYTES;
1156     fixedobj_pages = calloc(n_fixedobj_pages, sizeof(struct fixedobj_page));
1157     gc_assert(fixedobj_pages);
1158 
1159     n_bitmap_elts = (n_varyobj_pages + 31) / 32;
1160     int request = n_bitmap_elts * sizeof (int)
1161                 + n_varyobj_pages * (sizeof (short)+sizeof (char));
1162     char* varyobj_page_tables = malloc(request);
1163     gc_assert(varyobj_page_tables);
1164     memset(varyobj_page_tables, 0, request);
1165     varyobj_page_touched_bits = (unsigned int*)varyobj_page_tables;
1166     // The conservative value for 'touched' is 1.
1167     memset(varyobj_page_touched_bits, 0xff, n_bitmap_elts * sizeof (int));
1168     varyobj_page_scan_start_offset = (unsigned short*)(varyobj_page_touched_bits + n_bitmap_elts);
1169     varyobj_page_header_gens = (unsigned char*)(varyobj_page_scan_start_offset + n_varyobj_pages);
1170 }
1171 
1172 /* Because the immobile page table is not dumped into a core image,
1173    we have to reverse-engineer the characteristics of each page,
1174    which means figuring out what the object spacing should be.
1175    This is not difficult, but is a bit of a kludge */
1176 
immobile_obj_spacing(lispobj header_word,lispobj * obj,int actual_size)1177 static inline int immobile_obj_spacing(lispobj header_word, lispobj *obj,
1178                                        int actual_size)
1179 {
1180   lispobj this_layout, layout_layout;
1181 
1182   // 64-bit build does not need to align layouts on 256-byte boundary.
1183   // But this is a proof-of-concept that should work on 32-bit build,
1184   // which would need the alignment if compact instance headers are used.
1185   if (widetag_of(header_word)==INSTANCE_HEADER_WIDETAG) {
1186     this_layout = instance_layout(obj);
1187     layout_layout = instance_layout(native_pointer(this_layout));
1188     // If this object's layout is layout-of-layout, then this is a layout,
1189     // hence this page must have object spacing of 256 bytes.
1190     if (this_layout == layout_layout)
1191         return 256 / N_WORD_BYTES;
1192   }
1193   return actual_size; // in words
1194 }
1195 
1196 // Set the characteristics of each used page at image startup time.
immobile_space_coreparse(uword_t address,uword_t len)1197 void immobile_space_coreparse(uword_t address, uword_t len)
1198 {
1199     int n_pages, word_idx, page;
1200 
1201     n_pages = (len + IMMOBILE_CARD_BYTES - 1) / IMMOBILE_CARD_BYTES;
1202     if (address == IMMOBILE_SPACE_START) {
1203         for (page = 0 ; page < n_pages ; ++page) {
1204             lispobj* page_data = low_page_address(page);
1205             for (word_idx = 0 ; word_idx < WORDS_PER_PAGE ; ++word_idx) {
1206                 lispobj* obj = page_data + word_idx;
1207                 lispobj header = *obj;
1208                 if (!fixnump(header)) {
1209                     gc_assert(other_immediate_lowtag_p(*obj));
1210                     fixedobj_pages[page].attr.parts.obj_size
1211                         = sizetab[widetag_of(header)](obj);
1212                     fixedobj_pages[page].attr.parts.obj_align
1213                         = immobile_obj_spacing(header, obj,
1214                                                fixedobj_pages[page].attr.parts.obj_size);
1215                     fixedobj_pages[page].attr.parts.flags = WRITE_PROTECT;
1216                     fixedobj_pages[page].gens |= 1 << __immobile_obj_gen_bits(obj);
1217                     break;
1218                 }
1219             }
1220         }
1221     } else if (address == IMMOBILE_VARYOBJ_SUBSPACE_START) {
1222         lispobj* obj = (lispobj*)address;
1223         lispobj* limit = (lispobj*)(address + len);
1224         int n_words;
1225         low_page_index_t last_page = 0;
1226         for ( ; obj < limit ; obj += n_words ) {
1227             n_words = sizetab[widetag_of(*obj)](obj);
1228             if (obj[1] == 0 && (obj[0] == INSTANCE_HEADER_WIDETAG ||
1229                                 obj[0] == 0)) {
1230                 if (obj[0]) {
1231                     // Round up to the next immobile page.
1232                     lispobj page_end = CEILING((lispobj)obj, IMMOBILE_CARD_BYTES);
1233                     n_words = (lispobj*)page_end - obj;
1234                     obj[0] = SIMPLE_ARRAY_FIXNUM_WIDETAG;
1235                     obj[1] = make_fixnum(n_words - 2);
1236                 } else {
1237                     // There are trailing zeros to fill the core file page.
1238                     // This happens when the next object is exactly aligned
1239                     // to an immobile page. There is no padding object.
1240                     gc_assert(((lispobj)obj & (IMMOBILE_CARD_BYTES-1)) == 0);
1241                 }
1242                 limit = obj;
1243                 break;
1244             }
1245             if (immobile_filler_p(obj)) {
1246                 // Holes were chained through the debug_info slot at save.
1247                 // Just update the head of the chain.
1248                 varyobj_holes = (lispobj)obj;
1249                 continue;
1250             }
1251             low_page_index_t first_page = find_immobile_page_index(obj);
1252             last_page = find_immobile_page_index(obj+n_words-1);
1253             // Only the page with this object header gets a bit in its gen mask.
1254             VARYOBJ_PAGE_GENS(first_page) |= 1<<__immobile_obj_gen_bits(obj);
1255             // For each page touched by this object, set the page's
1256             // scan_start_offset, unless it was already set.
1257             int page;
1258             for (page = first_page ; page <= last_page ; ++page) {
1259                 if (!varyobj_page_scan_start_offset[page - FIRST_VARYOBJ_PAGE]) {
1260                     long offset = (char*)low_page_address(page+1) - (char*)obj;
1261                     varyobj_page_scan_start_offset[page - FIRST_VARYOBJ_PAGE]
1262                         = offset >> (WORD_SHIFT + 1);
1263                 }
1264             }
1265         }
1266         // Write-protect the pages occupied by the core file.
1267         // (There can be no inter-generation pointers.)
1268         int page;
1269         for (page = FIRST_VARYOBJ_PAGE ; page <= last_page ; ++page)
1270           varyobj_page_touched_bits[(page-FIRST_VARYOBJ_PAGE)/32] &= ~(1<<(page & 31));
1271         SYMBOL(IMMOBILE_SPACE_FREE_POINTER)->value = (lispobj)limit;
1272         compute_immobile_space_bound();
1273         write_protect_immobile_space();
1274     } else {
1275         lose("unknown immobile subspace");
1276     }
1277 }
1278 
1279 // Demote pseudo-static to highest normal generation
1280 // so that all objects become eligible for collection.
prepare_immobile_space_for_final_gc()1281 void prepare_immobile_space_for_final_gc()
1282 {
1283     int page;
1284     char* page_base;
1285     char* page_end = (char*)SYMBOL(IMMOBILE_FIXEDOBJ_FREE_POINTER)->value;
1286 
1287     // The list of holes need not be saved.
1288     SYMBOL(IMMOBILE_FREELIST)->value = NIL;
1289 
1290     for (page_base = (char*)IMMOBILE_SPACE_START, page = 0 ;
1291          page_base < page_end ;
1292          page_base += IMMOBILE_CARD_BYTES, ++page) {
1293         unsigned char mask = fixedobj_pages[page].gens;
1294         if (mask & 1<<PSEUDO_STATIC_GENERATION) {
1295             int obj_spacing = page_obj_align(page) << WORD_SHIFT;
1296             lispobj* obj = (lispobj*)page_base;
1297             lispobj* limit = (lispobj*)(page_base + IMMOBILE_CARD_BYTES - obj_spacing);
1298             for ( ; obj <= limit ; obj = (lispobj*)((char*)obj + obj_spacing) ) {
1299                 if (!fixnump(*obj)
1300                     && __immobile_obj_gen_bits(obj) == PSEUDO_STATIC_GENERATION)
1301                     assign_generation(obj, HIGHEST_NORMAL_GENERATION);
1302             }
1303             fixedobj_pages[page].gens = (mask & ~(1<<PSEUDO_STATIC_GENERATION))
1304                                         | 1<<HIGHEST_NORMAL_GENERATION;
1305         }
1306     }
1307 
1308     lispobj* obj = (lispobj*)IMMOBILE_VARYOBJ_SUBSPACE_START;
1309     lispobj* limit = (lispobj*)SYMBOL(IMMOBILE_SPACE_FREE_POINTER)->value;
1310     for ( ; obj < limit ; obj += sizetab[widetag_of(*obj)](obj) ) {
1311         if (__immobile_obj_gen_bits(obj) == PSEUDO_STATIC_GENERATION)
1312             assign_generation(obj, HIGHEST_NORMAL_GENERATION);
1313     }
1314     int max_page = find_immobile_page_index(limit-1);
1315     for ( page = FIRST_VARYOBJ_PAGE ; page <= max_page ; ++page ) {
1316         int mask = VARYOBJ_PAGE_GENS(page);
1317         if (mask & (1<<PSEUDO_STATIC_GENERATION)) {
1318             VARYOBJ_PAGE_GENS(page) = (mask & ~(1<<PSEUDO_STATIC_GENERATION))
1319                                       | 1<<HIGHEST_NORMAL_GENERATION;
1320         }
1321     }
1322 }
1323 
1324 // Now once again promote all objects to pseudo-static just prior to save.
1325 // 'coreparse' makes all pages in regular dynamic space pseudo-static.
1326 // But since immobile objects store their generation, it must be done at save,
1327 // or else it would have to be done on image restart
1328 // which would require writing to a lot of pages for no reason.
prepare_immobile_space_for_save()1329 void prepare_immobile_space_for_save()
1330 {
1331     int page;
1332     char *page_base;
1333     char* page_end = (char*)SYMBOL(IMMOBILE_FIXEDOBJ_FREE_POINTER)->value;
1334 
1335     for (page_base = (char*)IMMOBILE_SPACE_START, page = 0 ;
1336          page_base < page_end ;
1337          page_base += IMMOBILE_CARD_BYTES, ++page) {
1338         int obj_spacing = page_obj_align(page) << WORD_SHIFT;
1339         if (obj_spacing) {
1340             lispobj* obj = (lispobj*)page_base;
1341             lispobj* limit = (lispobj*)(page_base + IMMOBILE_CARD_BYTES - obj_spacing);
1342             for ( ; obj <= limit ; obj = (lispobj*)((char*)obj + obj_spacing) ) {
1343                 if (!fixnump(*obj))
1344                     assign_generation(obj, PSEUDO_STATIC_GENERATION);
1345             }
1346         }
1347     }
1348     lispobj* obj = (lispobj*)IMMOBILE_VARYOBJ_SUBSPACE_START;
1349     lispobj* limit = (lispobj*)SYMBOL(IMMOBILE_SPACE_FREE_POINTER)->value;
1350     for ( varyobj_holes = 0 ;  obj < limit ; obj += sizetab[widetag_of(*obj)](obj) ) {
1351         if (immobile_filler_p(obj)) {
1352             struct code* code  = (struct code*)obj;
1353             code->debug_info = varyobj_holes;
1354             varyobj_holes    = (lispobj)code;
1355             // 0-fill the unused space.
1356             int nwords = sizetab[widetag_of(*obj)](obj);
1357             memset(code->constants, 0,
1358                    (nwords * N_WORD_BYTES) - offsetof(struct code, constants));
1359         } else
1360             assign_generation(obj, PSEUDO_STATIC_GENERATION);
1361     }
1362     if ((lispobj)limit & (IMMOBILE_CARD_BYTES-1)) { // Last page is partially used.
1363         gc_assert(*limit == SIMPLE_ARRAY_FIXNUM_WIDETAG);
1364         // Write an otherwise illegal object at the free pointer.
1365         limit[0] = INSTANCE_HEADER_WIDETAG; // 0 payload length
1366         limit[1] = 0; // no layout
1367     }
1368 }
1369 
1370 //// Interface
1371 
immobile_space_handle_wp_violation(void * fault_addr)1372 int immobile_space_handle_wp_violation(void* fault_addr)
1373 {
1374     low_page_index_t page_index = find_immobile_page_index(fault_addr);
1375     if (page_index < 0) return 0;
1376 
1377     os_protect((os_vm_address_t)((lispobj)fault_addr & ~(IMMOBILE_CARD_BYTES-1)),
1378                IMMOBILE_CARD_BYTES, OS_VM_PROT_ALL);
1379     if (page_index >= FIRST_VARYOBJ_PAGE) {
1380         // The free pointer can move up or down. Attempting to insist that a WP
1381         // fault not occur above the free pointer (plus some slack) is not
1382         // threadsafe, so allow it anywhere. More strictness could be imparted
1383         // by tracking the max value attained by the free pointer.
1384         __sync_or_and_fetch(&varyobj_page_touched_bits[(page_index-FIRST_VARYOBJ_PAGE)/32],
1385                             1 << (page_index & 31));
1386     } else {
1387         // FIXME: a single bitmap of touched bits would make more sense,
1388         // and the _CLEARED flag doesn't achieve much if anything.
1389         if (!(fixedobj_pages[page_index].attr.parts.flags
1390               & (WRITE_PROTECT|WRITE_PROTECT_CLEARED)))
1391             return 0;
1392         SET_WP_FLAG(page_index, WRITE_PROTECT_CLEARED);
1393     }
1394     return 1;
1395 }
1396 
1397 // Find the object that encloses pointer.
1398 lispobj *
search_immobile_space(void * pointer)1399 search_immobile_space(void *pointer)
1400 {
1401     lispobj *start;
1402 
1403     if ((lispobj)pointer >= IMMOBILE_SPACE_START
1404         && (lispobj)pointer < SYMBOL(IMMOBILE_SPACE_FREE_POINTER)->value) {
1405         low_page_index_t page_index = find_immobile_page_index(pointer);
1406         if ((lispobj)pointer >= IMMOBILE_VARYOBJ_SUBSPACE_START) {
1407             start = (lispobj*)varyobj_scan_start(page_index);
1408             if (start > (lispobj*)pointer) return NULL;
1409             return (gc_search_space(start,
1410                                     (((lispobj*)pointer)+2)-start,
1411                                     (lispobj*)pointer));
1412         } else if ((lispobj)pointer < SYMBOL(IMMOBILE_FIXEDOBJ_FREE_POINTER)->value) {
1413             char *page_base = (char*)((lispobj)pointer & ~(IMMOBILE_CARD_BYTES-1));
1414             int spacing = page_obj_align(page_index) << WORD_SHIFT;
1415             int index = ((char*)pointer - page_base) / spacing;
1416             char *begin = page_base + spacing * index;
1417             char *end = begin + (page_obj_size(page_index) << WORD_SHIFT);
1418             if ((char*)pointer < end) return (lispobj*)begin;
1419         }
1420 
1421     }
1422     return NULL;
1423 }
1424 
1425 // For coalescing holes, we need to scan backwards, which is done by
1426 // looking backwards for a page that contains the start of a
1427 // block of objects one of which must abut 'obj'.
find_preceding_object(lispobj * obj)1428 lispobj* find_preceding_object(lispobj* obj)
1429 {
1430   int page = find_immobile_page_index(obj);
1431   while (1) {
1432       int offset = varyobj_page_scan_start_offset[page - FIRST_VARYOBJ_PAGE];
1433       if (offset) { // 0 means the page is empty.
1434           lispobj* start = varyobj_scan_start(page);
1435           if (start < obj) { // Scan from here forward
1436               while (1) {
1437                   lispobj* end = start + sizetab[widetag_of(*start)](start);
1438                   if (end == obj) return start;
1439                   gc_assert(end < obj);
1440                   start = end;
1441               }
1442           }
1443       }
1444       if (page == FIRST_VARYOBJ_PAGE) {
1445           gc_assert(obj == low_page_address(FIRST_VARYOBJ_PAGE));
1446           return 0; // Predecessor does not exist
1447       }
1448       --page;
1449   }
1450 }
1451 
1452 #include "genesis/vector.h"
1453 #include "genesis/instance.h"
alloc_layout(lispobj layout_layout,lispobj slots)1454 lispobj alloc_layout(lispobj layout_layout, lispobj slots)
1455 {
1456     struct vector* v = (struct vector*)native_pointer(slots);
1457     // If INSTANCE_DATA_START is 0, subtract 1 word for the header.
1458     // If 1, subtract 2 words: 1 for the header and 1 for the layout.
1459     if (fixnum_value(v->length) != (LAYOUT_SIZE - INSTANCE_DATA_START - 1))
1460         lose("bad arguments to alloc_layout");
1461     struct instance* l = (struct instance*)
1462       alloc_immobile_obj(MAKE_ATTR(256 / N_WORD_BYTES,
1463                                    CEILING(LAYOUT_SIZE,2),
1464                                    0),
1465 #ifdef LISP_FEATURE_COMPACT_INSTANCE_HEADER
1466                          (layout_layout << 32) |
1467 #endif
1468                          (LAYOUT_SIZE-1)<<8 | INSTANCE_HEADER_WIDETAG,
1469                          &layout_page_hint);
1470 #ifndef LISP_FEATURE_COMPACT_INSTANCE_HEADER
1471     l->slots[0] = layout_layout;
1472 #endif
1473     memcpy(&l->slots[INSTANCE_DATA_START], v->data,
1474            (LAYOUT_SIZE - INSTANCE_DATA_START - 1)*N_WORD_BYTES);
1475 
1476     // Possible efficiency win: make the "wasted" bytes after the layout into a
1477     // simple unboxed array so that heap-walking can skip in one step.
1478     // Probably only a performance issue for MAP-ALLOCATED-OBJECTS,
1479     // since scavenging know to skip by the object alignment anyway.
1480     return (lispobj)l | INSTANCE_POINTER_LOWTAG;
1481 }
1482 
1483 #include "genesis/symbol.h"
alloc_sym(lispobj name,int kind)1484 lispobj alloc_sym(lispobj name, int kind)
1485 {
1486     // In case we want different kinds of symbol pages (as was the hope)
1487     // to keep special variables apart from random trash.
1488     // i.e. symbols whose page is often written versus symbols
1489     // that exist only as monikers. This would minimize the number
1490     // of different pages that become touched between GC cycles.
1491     int* hint = &symbol_page_hint;
1492     struct symbol* s = (struct symbol*)
1493       alloc_immobile_obj(MAKE_ATTR(CEILING(SYMBOL_SIZE,2), // spacing
1494                                    CEILING(SYMBOL_SIZE,2), // size
1495                                    kind),
1496                          (SYMBOL_SIZE-1)<<8 | SYMBOL_HEADER_WIDETAG,
1497                          hint);
1498     s->value = UNBOUND_MARKER_WIDETAG;
1499     s->hash = 0;
1500     s->info = NIL;
1501     s->name = name;
1502     s->package = NIL;
1503     return (lispobj)s | OTHER_POINTER_LOWTAG;
1504 }
1505 
1506 #include "genesis/fdefn.h"
alloc_fdefn(lispobj name)1507 lispobj alloc_fdefn(lispobj name)
1508 {
1509     struct fdefn* f = (struct fdefn*)
1510       alloc_immobile_obj(MAKE_ATTR(CEILING(FDEFN_SIZE,2), // spacing
1511                                    CEILING(FDEFN_SIZE,2), // size
1512                                    0),
1513                          (FDEFN_SIZE-1)<<8 | FDEFN_WIDETAG,
1514                          &fdefn_page_hint);
1515     f->name = name;
1516     f->fun = NIL;
1517     f->raw_addr = 0;
1518     return (lispobj)f | OTHER_POINTER_LOWTAG;
1519 }
1520 
1521 #ifdef LISP_FEATURE_IMMOBILE_CODE
1522 //// Defragmentation
1523 
1524 /// It's tricky to try to use the scavenging functions
1525 /// for fixing up moved code. There are a few reasons:
1526 /// - we need to rewrite the space on top of itself
1527 /// - we store forwarding pointers outside of the space
1528 /// - we'd want to modify the transport functions
1529 ///   to deliberately fail in case one got called by mistake.
1530 /// So the approach is to basically do a large switch
1531 /// over all possible objects that we might need to fixup.
1532 /// There are some other strategies, none of which seem to
1533 /// make things obviously easier, such as:
1534 /// * variation (A)
1535 //    Copy the whole space to a shadow space,
1536 ///   deposit FPs in the real space but perform fixups
1537 ///   in the shadow space; then copy it back.
1538 ///   At least one problem here is that the chain of
1539 ///   pointers in simple-funs in the shadow space
1540 ///   has to compensate for their temporary address.
1541 /// * variation (B)
1542 ///   First permute all code into the shadow space,
1543 ///   copy it back, then fix it up. This is bad
1544 ///   because we can't figure out original jump targets
1545 ///   unless we have a reverse forwarding-pointer map.
1546 
1547 static char* tempspace;
1548 
adjust_words(lispobj * where,sword_t n_words)1549 static void adjust_words(lispobj *where, sword_t n_words)
1550 {
1551     int i;
1552     for (i=0;i<n_words;++i) {
1553         lispobj ptr;
1554         ptr = where[i];
1555         if (is_lisp_pointer(ptr) && immobile_space_p(ptr)
1556             && ptr >= IMMOBILE_VARYOBJ_SUBSPACE_START) {
1557             int offset_in_space = (lispobj)native_pointer(ptr) - IMMOBILE_VARYOBJ_SUBSPACE_START;
1558             lispobj* fp_where = (lispobj*)(tempspace + offset_in_space);
1559             where[i] = *fp_where;
1560             gc_assert(where[i]);
1561         }
1562     }
1563 }
1564 
adjust_fun_entry(lispobj raw_entry)1565 static lispobj adjust_fun_entry(lispobj raw_entry)
1566 {
1567     if (raw_entry > READ_ONLY_SPACE_END) {
1568         lispobj simple_fun = raw_entry - FUN_RAW_ADDR_OFFSET;
1569         adjust_words(&simple_fun, 1);
1570         return simple_fun + FUN_RAW_ADDR_OFFSET;
1571     }
1572     return raw_entry; // for fdefn which has a tramp
1573 }
1574 
fixup_space(lispobj * where,size_t n_words)1575 static void fixup_space(lispobj* where, size_t n_words)
1576 {
1577     lispobj* end = where + n_words;
1578     lispobj header_word;
1579     int widetag;
1580     long size;
1581 
1582     while (where < end) {
1583         header_word = *where;
1584         if (is_lisp_pointer(header_word) || is_lisp_immediate(header_word)) {
1585             adjust_words(where, 2); // A cons.
1586             where += 2;
1587             continue;
1588         }
1589         widetag = widetag_of(header_word);
1590         size = sizetab[widetag](where);
1591         switch (widetag) {
1592         default:
1593           if (!(widetag <= COMPLEX_DOUBLE_FLOAT_WIDETAG
1594                 || widetag == SAP_WIDETAG // Better not point to code!
1595                 || widetag == SIMD_PACK_WIDETAG
1596                 || unboxed_array_p(widetag)))
1597             lose("Unhandled widetag in fixup_range: %p\n", (void*)header_word);
1598           break;
1599         case INSTANCE_HEADER_WIDETAG:
1600           instance_scan_interleaved(adjust_words, where+1,
1601                                     instance_length(header_word) | 1,
1602                                     native_pointer(instance_layout(where)));
1603           break;
1604         case CODE_HEADER_WIDETAG:
1605           // Fixup the constant pool.
1606           adjust_words(where+1, code_header_words(header_word)-1);
1607           // Fixup all embedded simple-funs
1608           for_each_simple_fun(i, f, (struct code*)where, 1, {
1609               f->self = adjust_fun_entry(f->self);
1610               adjust_words(SIMPLE_FUN_SCAV_START(f), SIMPLE_FUN_SCAV_NWORDS(f));
1611           });
1612           break;
1613         case CLOSURE_HEADER_WIDETAG:
1614           where[1] = adjust_fun_entry(where[1]);
1615           // Fallthrough intended.
1616         case FUNCALLABLE_INSTANCE_HEADER_WIDETAG:
1617           // skip the trampoline word at where[1]
1618           adjust_words(where+2, (HeaderValue(header_word)&0xFFFF) - 1);
1619           break;
1620         case FDEFN_WIDETAG:
1621           adjust_words(where+1, 2);
1622           where[3] = adjust_fun_entry(where[3]);
1623           break;
1624 
1625         // All the array header widetags.
1626         case SIMPLE_VECTOR_WIDETAG:
1627         case SIMPLE_ARRAY_WIDETAG:
1628 #ifdef COMPLEX_CHARACTER_STRING_WIDETAG
1629         case COMPLEX_CHARACTER_STRING_WIDETAG:
1630 #endif
1631         case COMPLEX_BASE_STRING_WIDETAG:
1632         case COMPLEX_VECTOR_NIL_WIDETAG:
1633         case COMPLEX_BIT_VECTOR_WIDETAG:
1634         case COMPLEX_VECTOR_WIDETAG:
1635         case COMPLEX_ARRAY_WIDETAG:
1636         // And the other entirely boxed objects.
1637         case SYMBOL_HEADER_WIDETAG:
1638         case VALUE_CELL_HEADER_WIDETAG:
1639         case WEAK_POINTER_WIDETAG:
1640         case RATIO_WIDETAG:
1641         case COMPLEX_WIDETAG:
1642           // Use the sizing functions for generality.
1643           // Symbols can contain strange header bytes,
1644           // and vectors might have a padding word, etc.
1645           adjust_words(where+1, size-1);
1646           break;
1647         }
1648         where += size;
1649     }
1650 }
1651 
1652 extern void
1653 walk_generation(void (*proc)(lispobj*,size_t),
1654                 generation_index_t generation);
1655 
1656 // Both pointers are untagged.
set_load_address(lispobj * old,lispobj new)1657 static void set_load_address(lispobj* old, lispobj new)
1658 {
1659     int offset_in_space = (lispobj)old - IMMOBILE_VARYOBJ_SUBSPACE_START;
1660     lispobj* fp_loc = (lispobj*)(tempspace + offset_in_space);
1661     *fp_loc = new;
1662 }
1663 // Take and return an untagged code pointer.
get_load_address(lispobj * old)1664 static lispobj get_load_address(lispobj* old)
1665 {
1666     int offset_in_space = (lispobj)old - IMMOBILE_VARYOBJ_SUBSPACE_START;
1667     lispobj* fp_loc = (lispobj*)(tempspace + offset_in_space);
1668     return *fp_loc;
1669 }
1670 
1671 int* immobile_space_reloc_index;
1672 int* immobile_space_relocs;
1673 
defrag_immobile_space(int * components)1674 void defrag_immobile_space(int* components)
1675 {
1676     long total_size = 0;
1677     lispobj* addr;
1678     int i;
1679 
1680     // Compute where each code component will be moved to.
1681     for (i=0 ; components[i*2] ; ++i) {
1682         addr = (lispobj*)(long)components[i*2];
1683         gc_assert(lowtag_of((lispobj)addr) == OTHER_POINTER_LOWTAG);
1684         addr = native_pointer((lispobj)addr);
1685         int widetag = widetag_of(*addr);
1686         lispobj new_vaddr = 0;
1687         // FIXME: generalize
1688         gc_assert(widetag == CODE_HEADER_WIDETAG);
1689         if (!immobile_filler_p(addr)) {
1690             new_vaddr = IMMOBILE_VARYOBJ_SUBSPACE_START + total_size;
1691             total_size += sizetab[widetag](addr) << WORD_SHIFT;
1692         }
1693         components[i*2+1] = new_vaddr;
1694     }
1695     // tempspace is the old total size, not the new total size,
1696     // because forwarding pointers are stashed there prior to defrag.
1697     // (It's a perfect hashtable by any other name.)
1698     size_t tempspace_bytes = (SYMBOL(IMMOBILE_SPACE_FREE_POINTER)->value
1699                              - IMMOBILE_VARYOBJ_SUBSPACE_START);
1700     tempspace = calloc(tempspace_bytes, 1);
1701 
1702     // Deposit forwarding pointers into the temp space.
1703     lispobj new_vaddr;
1704     for (i=0 ; components[i*2] ; ++i) {
1705         if ((new_vaddr = components[i*2+1]) != 0) {
1706             addr = native_pointer(components[i*2]);
1707             int displacement = new_vaddr - (lispobj)addr;
1708             set_load_address(addr, new_vaddr);
1709             // FIXME: what is the 'if' for? Doesn't it _have_ to be code?
1710             if (widetag_of(*addr) == CODE_HEADER_WIDETAG)
1711                 for_each_simple_fun(index, fun, (struct code*)addr, 1, {
1712                     set_load_address((lispobj*)fun,
1713                                      make_lispobj((char*)fun + displacement,
1714                                                   FUN_POINTER_LOWTAG));
1715                 });
1716         }
1717     }
1718 
1719 #ifdef LISP_FEATURE_X86_64
1720     // Fix displacements in JMP and CALL instructions
1721     for (i = 0 ; immobile_space_reloc_index[i*2] ; ++i) {
1722         lispobj code      = immobile_space_reloc_index[i*2] - OTHER_POINTER_LOWTAG;
1723         lispobj load_addr = 0;
1724         if (code >= READ_ONLY_SPACE_START && code < READ_ONLY_SPACE_END)
1725             load_addr = code; // This code can not be moved or GCed.
1726         else
1727             load_addr = get_load_address((lispobj*)code);
1728         if (load_addr) { // Skip any code that was dropped by GC.
1729             int reloc_index     = immobile_space_reloc_index[i*2+1];
1730             int end_reloc_index = immobile_space_reloc_index[i*2+3];
1731             for ( ; reloc_index < end_reloc_index ; ++reloc_index ) {
1732                 unsigned char* inst_addr = (unsigned char*)(long)immobile_space_relocs[reloc_index];
1733                 gc_assert(*inst_addr == 0xE8 || *inst_addr == 0xE9);
1734                 unsigned int target_addr = (int)(long)inst_addr + 5 + *(int*)(inst_addr+1);
1735                 int target_adjust = 0;
1736                 if (target_addr >= IMMOBILE_VARYOBJ_SUBSPACE_START && target_addr < IMMOBILE_SPACE_END) {
1737                     lispobj* ptarg_fun_header =
1738                       (lispobj*)(target_addr - offsetof(struct simple_fun, code));
1739                     gc_assert(widetag_of(*ptarg_fun_header) == SIMPLE_FUN_HEADER_WIDETAG);
1740                     lispobj* ptarg_code_header =
1741                       ptarg_fun_header - HeaderValue(*ptarg_fun_header);
1742                     gc_assert(widetag_of(*ptarg_code_header) == CODE_HEADER_WIDETAG);
1743                     lispobj targ_load_addr = get_load_address(ptarg_code_header);
1744                     gc_assert(targ_load_addr); // was not discarded
1745                     target_adjust = targ_load_addr - (lispobj)ptarg_code_header;
1746                 }
1747                 *(int*)(inst_addr+1) += target_adjust + ((lispobj)code - load_addr);
1748             }
1749         }
1750     }
1751 #endif
1752     free(immobile_space_relocs);
1753     free(immobile_space_reloc_index);
1754 
1755     // Fix Lisp pointers in static, immobile, and dynamic spaces
1756     fixup_space((lispobj*)STATIC_SPACE_START,
1757                 (SYMBOL(STATIC_SPACE_FREE_POINTER)->value
1758                  - STATIC_SPACE_START) >> WORD_SHIFT);
1759     fixup_space((lispobj*)IMMOBILE_SPACE_START,
1760                 (SYMBOL(IMMOBILE_FIXEDOBJ_FREE_POINTER)->value
1761                  - IMMOBILE_SPACE_START) >> WORD_SHIFT);
1762     fixup_space((lispobj*)IMMOBILE_VARYOBJ_SUBSPACE_START,
1763                 (SYMBOL(IMMOBILE_SPACE_FREE_POINTER)->value
1764                  - IMMOBILE_VARYOBJ_SUBSPACE_START) >> WORD_SHIFT);
1765     walk_generation(fixup_space, -1);
1766 
1767     // Now permute the code components
1768     for (i=0 ; components[i*2] ; ++i) {
1769         if ((new_vaddr = components[i*2+1]) != 0) {
1770           addr = native_pointer(components[i*2]);
1771           char* to_addr = tempspace + ((char*)new_vaddr - (char*)IMMOBILE_VARYOBJ_SUBSPACE_START);
1772           size_t size = sizetab[widetag_of(*addr)](addr) << WORD_SHIFT;
1773           memcpy(to_addr, addr, size);
1774         }
1775     }
1776     // Copy the permuted space back where it belongs.
1777     memcpy((char*)IMMOBILE_VARYOBJ_SUBSPACE_START, tempspace, total_size);
1778 
1779     // Zero-fill the unused remainder of the immobile space
1780     lispobj free_ptr = IMMOBILE_VARYOBJ_SUBSPACE_START + total_size;
1781     lispobj old_free_ptr = SYMBOL(IMMOBILE_SPACE_FREE_POINTER)->value;
1782     bzero((char*)free_ptr, old_free_ptr - free_ptr);
1783     SYMBOL(IMMOBILE_SPACE_FREE_POINTER)->value = free_ptr;
1784     if (free_ptr & (IMMOBILE_CARD_BYTES-1)) { // unless page-aligned
1785         int remainder = IMMOBILE_CARD_BYTES - (free_ptr & (IMMOBILE_CARD_BYTES-1));
1786         ((lispobj*)free_ptr)[0] = SIMPLE_ARRAY_FIXNUM_WIDETAG;
1787         ((lispobj*)free_ptr)[1] = make_fixnum((remainder >> WORD_SHIFT) - 2);
1788     }
1789 
1790     free(tempspace);
1791     free(components);
1792 }
1793 #endif
1794 
verify_immobile_page_protection(int keep_gen,int new_gen)1795 void verify_immobile_page_protection(int keep_gen, int new_gen)
1796 {
1797   low_page_index_t page;
1798   lispobj* end = (lispobj*)SYMBOL(IMMOBILE_SPACE_FREE_POINTER)->value;
1799   low_page_index_t end_page = find_immobile_page_index((char*)end-1);
1800   lispobj* obj;
1801 
1802   for (page = FIRST_VARYOBJ_PAGE; page <= end_page; ++page) {
1803     if (!varyobj_page_touched(page)) {
1804       lispobj* page_begin = low_page_address(page);
1805       lispobj* page_end = page_begin + WORDS_PER_PAGE;
1806       // Assert that there are no old->young pointers.
1807       obj = varyobj_scan_start(page);
1808       // Never scan past the free pointer.
1809       // FIXME: It is is supposed to work to scan past the free pointer
1810       // on the last page, but the allocator needs to plop an array header there,
1811       // and sometimes it doesn't.
1812       lispobj* varyobj_free_ptr = (lispobj*)(SYMBOL(IMMOBILE_SPACE_FREE_POINTER)->value);
1813       if (page_end > varyobj_free_ptr) page_end = varyobj_free_ptr;
1814       for ( ; obj < page_end ; obj += sizetab[widetag_of(*obj)](obj) ) {
1815         if (!immobile_filler_p(obj)
1816             && varyobj_points_to_younger_p(obj, __immobile_obj_gen_bits(obj),
1817                                            keep_gen, new_gen,
1818                                            (char*)page_begin, (char*)page_end))
1819           lose("page WP bit on page %d is wrong\n", page);
1820       }
1821     }
1822   }
1823 }
1824 
1825 #ifdef VERIFY_PAGE_GENS
check_fixedobj_page(int page)1826 void check_fixedobj_page(int page)
1827 {
1828   // Every page should have a 'gens' mask which exactly reflects
1829   // the aggregate over all objects on that page. Verify that invariant,
1830   // checking all pages, not just the ones below the free pointer.
1831   int genmask, obj_size, obj_spacing, i, all_ok = 1;
1832   lispobj *obj, *limit, header;
1833   int sees_younger = 0;
1834 
1835   obj_size = page_obj_size(page);
1836   obj_spacing = page_obj_align(page);
1837   obj = low_page_address(page);
1838   limit = obj + WORDS_PER_PAGE - obj_spacing;
1839   genmask = 0;
1840   if (obj_size == 0) {
1841       for (i=0; i<WORDS_PER_PAGE; ++i)
1842         gc_assert(obj[i]==0);
1843       gc_assert(fixedobj_pages[page].gens ==0);
1844       return;
1845   }
1846   for ( ; obj <= limit ; obj += obj_spacing ) {
1847       header = *obj;
1848       if (!fixnump(header)) {
1849           int gen = __immobile_obj_gen_bits(obj);
1850           gc_assert(0 <= gen && gen <= PSEUDO_STATIC_GENERATION);
1851           genmask |= 1<<gen;
1852           if (fixedobj_points_to_younger_p(obj, obj_size, gen, 0xff, 0xff))
1853             sees_younger = 1;
1854       }
1855   }
1856   // It's not wrong if the gen0 bit is set spuriously, but it should only
1857   // happen at most once, on the first GC after image startup.
1858   // At all other times, the invariant should hold that if the freelist
1859   // indicated that space was available, and the new pointer differs,
1860   // then some gen0 object exists on the page.
1861   // The converse is true because of pseudo-atomicity of the allocator:
1862   // if some thread claimed a hole, then it also updated the freelist.
1863   // If it died before doing the latter, then the object allegedly created
1864   // was never really live, so won't contain any pointers.
1865   if (fixedobj_pages[page].gens != genmask
1866       && fixedobj_pages[page].gens != (genmask|1)) {
1867     fprintf(stderr, "Page #x%x @ %p: stored mask=%x actual=%x\n",
1868             page, low_page_address(page),
1869             fixedobj_pages[page].gens, genmask);
1870     all_ok = 0;
1871   }
1872   if (fixedobj_page_wp(page) && sees_younger) {
1873     fprintf(stderr, "Page #x%x @ %p: WP is wrong\n",
1874             page, low_page_address(page));
1875     all_ok = 0;
1876   }
1877   gc_assert(all_ok);
1878 }
1879 
1880 int n_immobile_objects;
1881 int *immobile_objects, *immobile_objects_limit;
1882 
comparator_eq(const void * a,const void * b)1883 int comparator_eq(const void* a, const void* b) {
1884   return *(int*)a - *(int*)b;
1885 }
1886 
1887 // Find the largest item less than or equal.
1888 // (useful for finding the object that contains a given pointer)
comparator_le(const void * a,const void * b)1889 int comparator_le(const void* a, const void* b) {
1890   int diff = *(int*)a - *(int*)b;
1891   if (diff <= 0) return diff;
1892   // If looking to the right would see an item strictly greater
1893   // than the sought key, or there is nothing to the right,
1894   // then deem this an exact match.
1895   if (b == (void*)immobile_objects_limit || ((int*)b)[1] > *(int*)a) return 0;
1896   return 1;
1897 }
1898 
1899 // Find the smallest item greater than or equal.
1900 // useful for finding the lowest item at or after a page base address.
comparator_ge(const void * a,const void * b)1901 int comparator_ge(const void* a, const void* b) {
1902   int diff = *(int*)a - *(int*)b;
1903   if (diff >= 0) return diff;
1904   // If looking to the left would see an item strictly less
1905   // than the sought key, or there is nothing to the left
1906   // then deem this an exact match.
1907   if (b == (void*)immobile_objects || ((int*)b)[-1] < *(int*)a) return 0;
1908   return -1;
1909 }
1910 
check_varyobj_pages()1911 void check_varyobj_pages()
1912 {
1913   // 1. Check that a linear scan sees only valid object headers,
1914   //    and that it terminates exactly at IMMOBILE_CODE_FREE_POINTER.
1915   lispobj* obj = (lispobj*)IMMOBILE_VARYOBJ_SUBSPACE_START;
1916   lispobj* end = (lispobj*)SYMBOL(IMMOBILE_SPACE_FREE_POINTER)->value;
1917   low_page_index_t end_page = find_immobile_page_index((char*)end-1);
1918 
1919   n_immobile_objects = 0;
1920   while (obj < end) {
1921     lispobj word = *obj;
1922     gc_assert(other_immediate_lowtag_p(word));
1923     int n_words = sizetab[widetag_of(word)](obj);
1924     obj += n_words;
1925     ++n_immobile_objects;
1926   }
1927   gc_assert(obj == end);
1928 
1929   // 2. Check that all scan_start_offsets are plausible.
1930   // Begin by collecting all object header locations into an array;
1931   immobile_objects = calloc(n_immobile_objects, sizeof (lispobj));
1932   immobile_objects_limit = immobile_objects + n_immobile_objects - 1;
1933   obj = (lispobj*)IMMOBILE_VARYOBJ_SUBSPACE_START;
1934   int i = 0;
1935   while (obj < end) {
1936     immobile_objects[i++] = (lispobj)obj;
1937     lispobj word = *obj;
1938     int n_words = sizetab[widetag_of(word)](obj);
1939     obj += n_words;
1940   }
1941   // Check that each page's scan start is a known immobile object
1942   // and that it is the right object.
1943   low_page_index_t page;
1944   for (page = FIRST_VARYOBJ_PAGE; page <= end_page; ++page) {
1945     lispobj page_addr = (lispobj)low_page_address(page);
1946     int* found_below = bsearch(&page_addr, immobile_objects, n_immobile_objects,
1947                                 sizeof (int), comparator_le);
1948     int* found_above = bsearch(&page_addr, immobile_objects, n_immobile_objects,
1949                                 sizeof (int), comparator_ge);
1950     int stored_scan_start = (int)(long)varyobj_scan_start(page);
1951     lispobj* scan_start_obj = (lispobj*)(long)*found_below;
1952     if (scan_start_obj != (lispobj*)(long)stored_scan_start) {
1953       //printf("page %d: found-below=%p stored=%p\n", page, scan_start_obj, stored_scan_start);
1954       while (immobile_filler_p(scan_start_obj)) {
1955         int nwords = sizetab[widetag_of(*scan_start_obj)](scan_start_obj);
1956         //        printf("skipping %d words to %p\n", nwords, scan_start_obj + nwords);
1957         scan_start_obj += nwords;
1958         // the stored scan start does not guarantee that it points
1959         // to a non-hole; we only assert that it *probably* does not.
1960         // As such, when computing the "correct" value, we allow
1961         // any value in between the legal bounding values for it.
1962         if ((int)(long)scan_start_obj == stored_scan_start)
1963           break;
1964         // If you hit the free pointer, or run off the page,
1965         // then the page is completely empty.
1966         if (scan_start_obj == (lispobj*)SYMBOL(IMMOBILE_SPACE_FREE_POINTER)->value
1967             || scan_start_obj >= (lispobj*)low_page_address(page+1)) {
1968           scan_start_obj = low_page_address(page+1);
1969           break;
1970         }
1971       }
1972     }
1973     if (scan_start_obj != (lispobj*)(long)stored_scan_start)
1974       lose("page %d: stored_scan_start=%p does not match found %p\n",
1975            page, stored_scan_start, *found_below);
1976     if (found_below != found_above) {
1977       // the object below must touch this page.
1978       // if it didn't, there should be a higher object below.
1979       lispobj* below = (lispobj*)(long)*found_below;
1980       int n_words = sizetab[widetag_of(*below)](below);
1981       lispobj* end = below + n_words;
1982       gc_assert(end > (lispobj*)page_addr);
1983     }
1984   }
1985   free(immobile_objects);
1986 
1987   // 3. The generation mask for each page is exactly the union
1988   //    of generation numbers of object headers on the page.
1989   for (page = FIRST_VARYOBJ_PAGE; page <= end_page; ++page) {
1990       if (!varyobj_page_scan_start_offset[page - FIRST_VARYOBJ_PAGE])
1991         continue; // page is all holes or never used
1992       obj = varyobj_scan_start(page);
1993       lispobj word = *obj;
1994       int n_words = sizetab[widetag_of(word)](obj);
1995       // Skip the first object if it doesn't start on this page.
1996       if (obj < (lispobj*)low_page_address(page)) obj += n_words;
1997       lispobj* limit = (lispobj*)low_page_address(page) + WORDS_PER_PAGE;
1998       lispobj* freeptr = (lispobj*)SYMBOL(IMMOBILE_SPACE_FREE_POINTER)->value;
1999       if (limit > freeptr) limit = freeptr;
2000       int mask = 0;
2001       for ( ; obj < limit ; obj += sizetab[widetag_of(*obj)](obj) ) {
2002           int gen = __immobile_obj_gen_bits(obj);
2003           if (immobile_filler_p(obj)) {
2004               gc_assert(gen == 0);
2005           } else {
2006               gc_assert(0 <= gen && gen <= PSEUDO_STATIC_GENERATION);
2007               mask |= 1 << gen;
2008           }
2009           if (widetag_of(*obj) == CODE_HEADER_WIDETAG) {
2010               lispobj entry_point; /* tagged pointer to entry point */
2011               struct simple_fun *function_ptr; /* untagged pointer to entry point */
2012               for (entry_point = ((struct code*)obj)->entry_points;
2013                    entry_point != NIL;
2014                    entry_point = function_ptr->next) {
2015                   function_ptr = (struct simple_fun *) native_pointer(entry_point);
2016                   gc_assert_verbose(is_lisp_pointer(entry_point),
2017                                     "Code %p entry point %p is not a lisp pointer.",
2018                                     obj, (void*)entry_point);
2019                   gc_assert(widetag_of(function_ptr->header)==SIMPLE_FUN_HEADER_WIDETAG);
2020               }
2021           }
2022       }
2023       gc_assert(mask == VARYOBJ_PAGE_GENS(page));
2024   }
2025 }
2026 #endif
2027