1 /* Copyright (C) 2001-2006 Artifex Software, Inc.
2    All Rights Reserved.
3 
4    This software is provided AS-IS with no warranty, either express or
5    implied.
6 
7    This software is distributed under license and may not be copied, modified
8    or distributed except as expressly authorized under the terms of that
9    license.  Refer to licensing information at http://www.artifex.com/
10    or contact Artifex Software, Inc.,  7 Mt. Lassen Drive - Suite A-134,
11    San Rafael, CA  94903, U.S.A., +1(415)492-9861, for further information.
12 */
13 
14 /* $Id: isave.c 9043 2008-08-28 22:48:19Z giles $ */
15 /* Save/restore manager for Ghostscript interpreter */
16 #include "ghost.h"
17 #include "memory_.h"
18 #include "ierrors.h"
19 #include "gsexit.h"
20 #include "gsstruct.h"
21 #include "stream.h"		/* for linking for forgetsave */
22 #include "iastate.h"
23 #include "inamedef.h"
24 #include "iname.h"
25 #include "ipacked.h"
26 #include "isave.h"
27 #include "isstate.h"
28 #include "store.h"		/* for ref_assign */
29 #include "ivmspace.h"
30 #include "igc.h"
31 #include "gsutil.h"		/* gs_next_ids prototype */
32 
33 /* Structure descriptor */
34 private_st_alloc_save();
35 
36 /* Define the maximum amount of data we are willing to scan repeatedly -- */
37 /* see below for details. */
38 static const long max_repeated_scan = 100000;
39 
40 /* Define the minimum space for creating an inner chunk. */
41 /* Must be at least sizeof(chunk_head_t). */
42 static const long min_inner_chunk_space = sizeof(chunk_head_t) + 500;
43 
44 /*
45  * The logic for saving and restoring the state is complex.
46  * Both the changes to individual objects, and the overall state
47  * of the memory manager, must be saved and restored.
48  */
49 
50 /*
51  * To save the state of the memory manager:
52  *      Save the state of the current chunk in which we are allocating.
53  *      Shrink all chunks to their inner unallocated region.
54  *      Save and reset the free block chains.
55  * By doing this, we guarantee that no object older than the save
56  * can be freed.
57  *
58  * To restore the state of the memory manager:
59  *      Free all chunks newer than the save, and the descriptors for
60  *        the inner chunks created by the save.
61  *      Make current the chunk that was current at the time of the save.
62  *      Restore the state of the current chunk.
63  *
64  * In addition to save ("start transaction") and restore ("abort transaction"),
65  * we support forgetting a save ("commit transation").  To forget a save:
66  *      Reassign to the next outer save all chunks newer than the save.
67  *      Free the descriptors for the inners chunk, updating their outer
68  *        chunks to reflect additional allocations in the inner chunks.
69  *      Concatenate the free block chains with those of the outer save.
70  */
71 
72 /*
73  * For saving changes to individual objects, we add an "attribute" bit
74  * (l_new) that logically belongs to the slot where the ref is stored,
75  * not to the ref itself.  The bit means "the contents of this slot
76  * have been changed, or the slot was allocated, since the last save."
77  * To keep track of changes since the save, we associate a chain of
78  * <slot, old_contents> pairs that remembers the old contents of slots.
79  *
80  * When creating an object, if the save level is non-zero:
81  *      Set l_new in all slots.
82  *
83  * When storing into a slot, if the save level is non-zero:
84  *      If l_new isn't set, save the address and contents of the slot
85  *        on the current contents chain.
86  *      Set l_new after storing the new value.
87  *
88  * To do a save:
89  *      If the save level is non-zero:
90  *              Reset l_new in all slots on the contents chain, and in all
91  *                objects created since the previous save.
92  *      Push the head of the contents chain, and reset the chain to empty.
93  *
94  * To do a restore:
95  *      Check all the stacks to make sure they don't contain references
96  *        to objects created since the save.
97  *      Restore all the slots on the contents chain.
98  *      Pop the contents chain head.
99  *      If the save level is now non-zero:
100  *              Scan the newly restored contents chain, and set l_new in all
101  *                the slots it references.
102  *              Scan all objects created since the previous save, and set
103  *                l_new in all the slots of each object.
104  *
105  * To forget a save:
106  *      If the save level is greater than 1:
107  *              Set l_new as for a restore, per the next outer save.
108  *              Concatenate the next outer contents chain to the end of
109  *                the current one.
110  *      If the save level is 1:
111  *              Reset l_new as for a save.
112  *              Free the contents chain.
113  */
114 
115 /*
116  * A consequence of the foregoing algorithms is that the cost of a save is
117  * proportional to the total amount of data allocated since the previous
118  * save.  If a PostScript program reads in a large amount of setup code and
119  * then uses save/restore heavily, each save/restore will be expensive.  To
120  * mitigate this, we check to see how much data we have scanned at this save
121  * level: if it is large, we do a second, invisible save.  This greatly
122  * reduces the cost of inner saves, at the expense of possibly saving some
123  * changes twice that otherwise would only have to be saved once.
124  */
125 
126 /*
127  * The presence of global and local VM complicates the situation further.
128  * There is a separate save chain and contents chain for each VM space.
129  * When multiple contexts are fully implemented, save and restore will have
130  * the following effects, according to the privacy status of the current
131  * context's global and local VM:
132  *      Private global, private local:
133  *              The outermost save saves both global and local VM;
134  *                otherwise, save only saves local VM.
135  *      Shared global, private local:
136  *              Save only saves local VM.
137  *      Shared global, shared local:
138  *              Save only saves local VM, and suspends all other contexts
139  *                sharing the same local VM until the matching restore.
140  * Since we do not currently implement multiple contexts, only the first
141  * case is relevant.
142  *
143  * Note that when saving the contents of a slot, the choice of chain
144  * is determined by the VM space in which the slot is allocated,
145  * not by the current allocation mode.
146  */
147 
148 /* Tracing printout */
149 static void
print_save(const char * str,uint spacen,const alloc_save_t * sav)150 print_save(const char *str, uint spacen, const alloc_save_t *sav)
151 {
152   if_debug5('u', "[u]%s space %u 0x%lx: cdata = 0x%lx, id = %lu\n",\
153 	    str, spacen, (ulong)sav, (ulong)sav->client_data, (ulong)sav->id);
154 }
155 
156 /* A link to igcref.c . */
157 ptr_proc_reloc(igc_reloc_ref_ptr_nocheck, ref_packed);
158 
159 /*
160  * Structure for saved change chain for save/restore.  Because of the
161  * garbage collector, we need to distinguish the cases where the change
162  * is in a static object, a dynamic ref, or a dynamic struct.
163  */
164 typedef struct alloc_change_s alloc_change_t;
165 struct alloc_change_s {
166     alloc_change_t *next;
167     ref_packed *where;
168     ref contents;
169 #define AC_OFFSET_STATIC (-2)	/* static object */
170 #define AC_OFFSET_REF (-1)	/* dynamic ref */
171 #define AC_OFFSET_ALLOCATED (-3) /* a newly allocated ref array */
172     short offset;		/* if >= 0, offset within struct */
173 };
174 
175 static
CLEAR_MARKS_PROC(change_clear_marks)176 CLEAR_MARKS_PROC(change_clear_marks)
177 {
178     alloc_change_t *const ptr = (alloc_change_t *)vptr;
179 
180     if (r_is_packed(&ptr->contents))
181 	r_clear_pmark((ref_packed *) & ptr->contents);
182     else
183 	r_clear_attrs(&ptr->contents, l_mark);
184 }
185 static
186 ENUM_PTRS_WITH(change_enum_ptrs, alloc_change_t *ptr) return 0;
187 ENUM_PTR(0, alloc_change_t, next);
188 case 1:
189     if (ptr->offset >= 0)
190 	ENUM_RETURN((byte *) ptr->where - ptr->offset);
191     else
192 	if (ptr->offset != AC_OFFSET_ALLOCATED)
193 	    ENUM_RETURN_REF(ptr->where);
194 	else {
195 	    /* Don't enumerate ptr->where, because it
196 	       needs a special processing with
197 	       alloc_save__filter_changes. */
198     	    ENUM_RETURN(0);
199 	}
200 case 2:
201     ENUM_RETURN_REF(&ptr->contents);
202 ENUM_PTRS_END
RELOC_PTRS_WITH(change_reloc_ptrs,alloc_change_t * ptr)203 static RELOC_PTRS_WITH(change_reloc_ptrs, alloc_change_t *ptr)
204 {
205     RELOC_VAR(ptr->next);
206     switch (ptr->offset) {
207 	case AC_OFFSET_STATIC:
208 	    break;
209 	case AC_OFFSET_REF:
210 	    RELOC_REF_PTR_VAR(ptr->where);
211 	    break;
212 	case AC_OFFSET_ALLOCATED:
213 	    /* We know that ptr->where may point to an unmarked object
214 	       because change_enum_ptrs skipped it,
215 	       and we know it always points to same space
216 	       because we took a special care when calling alloc_save_change_alloc.
217 	       Therefore we must skip the check for the mark,
218 	       which would happen if we call the regular relocation function
219 	       igc_reloc_ref_ptr from RELOC_REF_PTR_VAR.
220 	       Calling igc_reloc_ref_ptr_nocheck instead. */
221 	    {	/* A sanity check. */
222 		obj_header_t *pre = (obj_header_t *)ptr->where - 1, *pre1 = 0;
223 
224 		if (pre->o_type != &st_refs)
225 		    pre1->o_type = 0; /* issue a segfault. */
226 	    }
227 	    if (ptr->where != 0 && !gcst->relocating_untraced)
228 		ptr->where = igc_reloc_ref_ptr_nocheck(ptr->where, gcst);
229 	    break;
230 	default:
231 	    {
232 		byte *obj = (byte *) ptr->where - ptr->offset;
233 
234 		RELOC_VAR(obj);
235 		ptr->where = (ref_packed *) (obj + ptr->offset);
236 	    }
237 	    break;
238     }
239     if (r_is_packed(&ptr->contents))
240 	r_clear_pmark((ref_packed *) & ptr->contents);
241     else {
242 	RELOC_REF_VAR(ptr->contents);
243 	r_clear_attrs(&ptr->contents, l_mark);
244     }
245 }
246 RELOC_PTRS_END
247 gs_private_st_complex_only(st_alloc_change, alloc_change_t, "alloc_change",
248 		change_clear_marks, change_enum_ptrs, change_reloc_ptrs, 0);
249 
250 /* Debugging printout */
251 #ifdef DEBUG
252 static void
alloc_save_print(alloc_change_t * cp,bool print_current)253 alloc_save_print(alloc_change_t * cp, bool print_current)
254 {
255     dprintf2(" 0x%lx: 0x%lx: ", (ulong) cp, (ulong) cp->where);
256     if (r_is_packed(&cp->contents)) {
257 	if (print_current)
258 	    dprintf2("saved=%x cur=%x\n", *(ref_packed *) & cp->contents,
259 		     *cp->where);
260 	else
261 	    dprintf1("%x\n", *(ref_packed *) & cp->contents);
262     } else {
263 	if (print_current)
264 	    dprintf6("saved=%x %x %lx cur=%x %x %lx\n",
265 		     r_type_attrs(&cp->contents), r_size(&cp->contents),
266 		     (ulong) cp->contents.value.intval,
267 		     r_type_attrs((ref *) cp->where),
268 		     r_size((ref *) cp->where),
269 		     (ulong) ((ref *) cp->where)->value.intval);
270 	else
271 	    dprintf3("%x %x %lx\n",
272 		     r_type_attrs(&cp->contents), r_size(&cp->contents),
273 		     (ulong) cp->contents.value.intval);
274     }
275 }
276 #endif
277 
278 /* Forward references */
279 static int  restore_resources(alloc_save_t *, gs_ref_memory_t *);
280 static void restore_free(gs_ref_memory_t *);
281 static int  save_set_new(gs_ref_memory_t * mem, bool to_new, bool set_limit, ulong *pscanned);
282 static int  save_set_new_changes(gs_ref_memory_t *, bool, bool);
283 static bool check_l_mark(void *obj);
284 
285 /* Initialize the save/restore machinery. */
286 void
alloc_save_init(gs_dual_memory_t * dmem)287 alloc_save_init(gs_dual_memory_t * dmem)
288 {
289     alloc_set_not_in_save(dmem);
290 }
291 
292 /* Record that we are in a save. */
293 static void
alloc_set_masks(gs_dual_memory_t * dmem,uint new_mask,uint test_mask)294 alloc_set_masks(gs_dual_memory_t *dmem, uint new_mask, uint test_mask)
295 {
296     int i;
297     gs_ref_memory_t *mem;
298 
299     dmem->new_mask = new_mask;
300     dmem->test_mask = test_mask;
301     for (i = 0; i < countof(dmem->spaces.memories.indexed); ++i)
302 	if ((mem = dmem->spaces.memories.indexed[i]) != 0) {
303 	    mem->new_mask = new_mask, mem->test_mask = test_mask;
304 	    if (mem->stable_memory != (gs_memory_t *)mem) {
305 		mem = (gs_ref_memory_t *)mem->stable_memory;
306 		mem->new_mask = new_mask, mem->test_mask = test_mask;
307 	    }
308 	}
309 }
310 void
alloc_set_in_save(gs_dual_memory_t * dmem)311 alloc_set_in_save(gs_dual_memory_t *dmem)
312 {
313     alloc_set_masks(dmem, l_new, l_new);
314 }
315 
316 /* Record that we are not in a save. */
317 void
alloc_set_not_in_save(gs_dual_memory_t * dmem)318 alloc_set_not_in_save(gs_dual_memory_t *dmem)
319 {
320     alloc_set_masks(dmem, 0, ~0);
321 }
322 
323 /* Save the state. */
324 static alloc_save_t *alloc_save_space(gs_ref_memory_t *mem,
325 				       gs_dual_memory_t *dmem,
326 				       ulong sid);
327 static void
alloc_free_save(gs_ref_memory_t * mem,alloc_save_t * save,const char * scn)328 alloc_free_save(gs_ref_memory_t *mem, alloc_save_t *save, const char *scn)
329 {
330     gs_free_object((gs_memory_t *)mem, save, scn);
331     /* Free any inner chunk structures.  This is the easiest way to do it. */
332     restore_free(mem);
333 }
334 int
alloc_save_state(gs_dual_memory_t * dmem,void * cdata,ulong * psid)335 alloc_save_state(gs_dual_memory_t * dmem, void *cdata, ulong *psid)
336 {
337     gs_ref_memory_t *lmem = dmem->space_local;
338     gs_ref_memory_t *gmem = dmem->space_global;
339     ulong sid = gs_next_ids((const gs_memory_t *)lmem->stable_memory, 2);
340     bool global =
341 	lmem->save_level == 0 && gmem != lmem &&
342 	gmem->num_contexts == 1;
343     alloc_save_t *gsave =
344 	(global ? alloc_save_space(gmem, dmem, sid + 1) : (alloc_save_t *) 0);
345     alloc_save_t *lsave = alloc_save_space(lmem, dmem, sid);
346 
347     if (lsave == 0 || (global && gsave == 0)) {
348 	if (lsave != 0)
349 	    alloc_free_save(lmem, lsave, "alloc_save_state(local save)");
350 	if (gsave != 0)
351 	    alloc_free_save(gmem, gsave, "alloc_save_state(global save)");
352 	return 0;
353     }
354     if (gsave != 0) {
355 	gsave->client_data = 0;
356 	print_save("save", gmem->space, gsave);
357 	/* Restore names when we do the local restore. */
358 	lsave->restore_names = gsave->restore_names;
359 	gsave->restore_names = false;
360     }
361     lsave->id = sid;
362     lsave->client_data = cdata;
363     print_save("save", lmem->space, lsave);
364     /* Reset the l_new attribute in all slots.  The only slots that */
365     /* can have the attribute set are the ones on the changes chain, */
366     /* and ones in objects allocated since the last save. */
367     if (lmem->save_level > 1) {
368 	ulong scanned;
369 	int code = save_set_new(&lsave->state, false, true, &scanned);
370 
371 	if (code < 0)
372 	    return code;
373 #if 0 /* Disable invisible save levels. */
374 	if ((lsave->state.total_scanned += scanned) > max_repeated_scan) {
375 	    /* Do a second, invisible save. */
376 	    alloc_save_t *rsave;
377 
378 	    rsave = alloc_save_space(lmem, dmem, 0L);
379 	    if (rsave != 0) {
380 		rsave->client_data = cdata;
381 #if 0 /* Bug 688153 */
382 		rsave->id = lsave->id;
383 		print_save("save", lmem->space, rsave);
384 		lsave->id = 0;	/* mark as invisible */
385 		rsave->state.save_level--; /* ditto */
386 		lsave->client_data = 0;
387 #else
388 		rsave->id = 0;  /* mark as invisible */
389 		print_save("save", lmem->space, rsave);
390 		rsave->state.save_level--; /* ditto */
391 		rsave->client_data = 0;
392 #endif
393 		/* Inherit the allocated space count -- */
394 		/* we need this for triggering a GC. */
395 		print_save("save", lmem->space, lsave);
396 	    }
397 	}
398 #endif
399     }
400     alloc_set_in_save(dmem);
401     *psid = sid;
402     return 0;
403 }
404 /* Save the state of one space (global or local). */
405 static alloc_save_t *
alloc_save_space(gs_ref_memory_t * mem,gs_dual_memory_t * dmem,ulong sid)406 alloc_save_space(gs_ref_memory_t * mem, gs_dual_memory_t * dmem, ulong sid)
407 {
408     gs_ref_memory_t save_mem;
409     alloc_save_t *save;
410     chunk_t *cp;
411     chunk_t *new_pcc = 0;
412 
413     save_mem = *mem;
414     alloc_close_chunk(mem);
415     mem->pcc = 0;
416     gs_memory_status((gs_memory_t *) mem, &mem->previous_status);
417     ialloc_reset(mem);
418 
419     /* Create inner chunks wherever it's worthwhile. */
420 
421     for (cp = save_mem.cfirst; cp != 0; cp = cp->cnext) {
422 	if (cp->ctop - cp->cbot > min_inner_chunk_space) {
423 	    /* Create an inner chunk to cover only the unallocated part. */
424 	    chunk_t *inner =
425 		gs_raw_alloc_struct_immovable(mem->non_gc_memory, &st_chunk,
426 					      "alloc_save_space(inner)");
427 
428 	    if (inner == 0)
429 		break;		/* maybe should fail */
430 	    alloc_init_chunk(inner, cp->cbot, cp->ctop, cp->sreloc != 0, cp);
431 	    alloc_link_chunk(inner, mem);
432 	    if_debug2('u', "[u]inner chunk: cbot=0x%lx ctop=0x%lx\n",
433 		      (ulong) inner->cbot, (ulong) inner->ctop);
434 	    if (cp == save_mem.pcc)
435 		new_pcc = inner;
436 	}
437     }
438     mem->pcc = new_pcc;
439     alloc_open_chunk(mem);
440 
441     save = gs_alloc_struct((gs_memory_t *) mem, alloc_save_t,
442 			   &st_alloc_save, "alloc_save_space(save)");
443     if_debug2('u', "[u]save space %u at 0x%lx\n",
444 	      mem->space, (ulong) save);
445     if (save == 0) {
446 	/* Free the inner chunk structures.  This is the easiest way. */
447 	restore_free(mem);
448 	*mem = save_mem;
449 	return 0;
450     }
451     save->state = save_mem;
452     save->spaces = dmem->spaces;
453     save->restore_names = (name_memory(mem) == (gs_memory_t *) mem);
454     save->is_current = (dmem->current == mem);
455     save->id = sid;
456     mem->saved = save;
457     if_debug2('u', "[u%u]file_save 0x%lx\n",
458 	      mem->space, (ulong) mem->streams);
459     mem->streams = 0;
460     mem->total_scanned = 0;
461     mem->total_scanned_after_compacting = 0;
462     if (sid)
463 	mem->save_level++;
464     return save;
465 }
466 
467 /* Record a state change that must be undone for restore, */
468 /* and mark it as having been saved. */
469 int
alloc_save_change_in(gs_ref_memory_t * mem,const ref * pcont,ref_packed * where,client_name_t cname)470 alloc_save_change_in(gs_ref_memory_t *mem, const ref * pcont,
471 		  ref_packed * where, client_name_t cname)
472 {
473     register alloc_change_t *cp;
474 
475     if (mem->new_mask == 0)
476 	return 0;		/* no saving */
477     cp = gs_alloc_struct((gs_memory_t *)mem, alloc_change_t,
478 			 &st_alloc_change, "alloc_save_change");
479     if (cp == 0)
480 	return -1;
481     cp->next = mem->changes;
482     cp->where = where;
483     if (pcont == NULL)
484 	cp->offset = AC_OFFSET_STATIC;
485     else if (r_is_array(pcont) || r_has_type(pcont, t_dictionary))
486 	cp->offset = AC_OFFSET_REF;
487     else if (r_is_struct(pcont))
488 	cp->offset = (byte *) where - (byte *) pcont->value.pstruct;
489     else {
490 	lprintf3("Bad type %u for save!  pcont = 0x%lx, where = 0x%lx\n",
491 		 r_type(pcont), (ulong) pcont, (ulong) where);
492 	gs_abort((const gs_memory_t *)mem);
493     }
494     if (r_is_packed(where))
495 	*(ref_packed *)&cp->contents = *where;
496     else {
497 	ref_assign_inline(&cp->contents, (ref *) where);
498 	r_set_attrs((ref *) where, l_new);
499     }
500     mem->changes = cp;
501 #ifdef DEBUG
502     if (gs_debug_c('U')) {
503 	dlprintf1("[U]save(%s)", client_name_string(cname));
504 	alloc_save_print(cp, false);
505     }
506 #endif
507     return 0;
508 }
509 int
alloc_save_change(gs_dual_memory_t * dmem,const ref * pcont,ref_packed * where,client_name_t cname)510 alloc_save_change(gs_dual_memory_t * dmem, const ref * pcont,
511 		  ref_packed * where, client_name_t cname)
512 {
513     gs_ref_memory_t *mem =
514 	(pcont == NULL ? dmem->space_local :
515 	 dmem->spaces_indexed[r_space(pcont) >> r_space_shift]);
516 
517     return alloc_save_change_in(mem, pcont, where, cname);
518 }
519 
520 /* Allocate a structure for recording an allocation event. */
521 int
alloc_save_change_alloc(gs_ref_memory_t * mem,client_name_t cname,ref_packed *** ppr)522 alloc_save_change_alloc(gs_ref_memory_t *mem, client_name_t cname, ref_packed ***ppr)
523 {
524     register alloc_change_t *cp;
525 
526     if (mem->new_mask == 0)
527 	return 0;		/* no saving */
528     cp = gs_alloc_struct((gs_memory_t *)mem, alloc_change_t,
529 			 &st_alloc_change, "alloc_save_change");
530     if (cp == 0)
531 	return_error(e_VMerror);
532     cp->next = mem->changes;
533     cp->where = 0;
534     cp->offset = AC_OFFSET_ALLOCATED;
535     make_null(&cp->contents);
536     mem->changes = cp;
537     *ppr = &cp->where;
538     return 1;
539 }
540 
541 /* Remove an AC_OFFSET_ALLOCATED element. */
542 void
alloc_save_remove(gs_ref_memory_t * mem,ref_packed * obj,client_name_t cname)543 alloc_save_remove(gs_ref_memory_t *mem, ref_packed *obj, client_name_t cname)
544 {
545     alloc_change_t **cpp = &mem->changes;
546 
547     for (; *cpp != NULL;) {
548 	alloc_change_t *cp = *cpp;
549 
550 	if (cp->offset == AC_OFFSET_ALLOCATED && cp->where == obj) {
551 	    if (mem->scan_limit == cp)
552 		mem->scan_limit = cp->next;
553 	    *cpp = cp->next;
554 	    gs_free_object((gs_memory_t *)mem, cp, "alloc_save_remove");
555 	} else
556 	    cpp = &(*cpp)->next;
557     }
558 }
559 
560 /* Filter save change lists. */
561 static inline void
alloc_save__filter_changes_in_space(gs_ref_memory_t * mem)562 alloc_save__filter_changes_in_space(gs_ref_memory_t *mem)
563 {
564     /* This is a special function, which is called
565        from the garbager after setting marks and before collecting
566        unused space. Therefore it just resets marks for
567        elements being released instead releasing them really. */
568     alloc_change_t **cpp = &mem->changes;
569 
570     for (; *cpp != NULL; ) {
571 	alloc_change_t *cp = *cpp;
572 
573 	if (cp->offset == AC_OFFSET_ALLOCATED && !check_l_mark(cp->where)) {
574 	    obj_header_t *pre = (obj_header_t *)cp - 1;
575 
576 	    *cpp = cp->next;
577 	    cp->where = 0;
578 	    if (mem->scan_limit == cp)
579 		mem->scan_limit = cp->next;
580 	    o_set_unmarked(pre);
581 	} else
582 	    cpp = &(*cpp)->next;
583     }
584 }
585 
586 /* Filter save change lists. */
587 void
alloc_save__filter_changes(gs_ref_memory_t * memory)588 alloc_save__filter_changes(gs_ref_memory_t *memory)
589 {
590     gs_ref_memory_t *mem = memory;
591 
592     for  (; mem; mem = &mem->saved->state)
593 	alloc_save__filter_changes_in_space(mem);
594 }
595 
596 /* Return (the id of) the innermost externally visible save object, */
597 /* i.e., the innermost save with a non-zero ID. */
598 ulong
alloc_save_current_id(const gs_dual_memory_t * dmem)599 alloc_save_current_id(const gs_dual_memory_t * dmem)
600 {
601     const alloc_save_t *save = dmem->space_local->saved;
602 
603     while (save != 0 && save->id == 0)
604 	save = save->state.saved;
605     return save->id;
606 }
607 alloc_save_t *
alloc_save_current(const gs_dual_memory_t * dmem)608 alloc_save_current(const gs_dual_memory_t * dmem)
609 {
610     return alloc_find_save(dmem, alloc_save_current_id(dmem));
611 }
612 
613 /* Test whether a reference would be invalidated by a restore. */
614 bool
alloc_is_since_save(const void * vptr,const alloc_save_t * save)615 alloc_is_since_save(const void *vptr, const alloc_save_t * save)
616 {
617     /* A reference postdates a save iff it is in a chunk allocated */
618     /* since the save (including any carried-over inner chunks). */
619 
620     const char *const ptr = (const char *)vptr;
621     register const gs_ref_memory_t *mem = save->space_local;
622 
623     if_debug2('U', "[U]is_since_save 0x%lx, 0x%lx:\n",
624 	      (ulong) ptr, (ulong) save);
625     if (mem->saved == 0) {	/* This is a special case, the final 'restore' from */
626 	/* alloc_restore_all. */
627 	return true;
628     }
629     /* Check against chunks allocated since the save. */
630     /* (There may have been intermediate saves as well.) */
631     for (;; mem = &mem->saved->state) {
632 	const chunk_t *cp;
633 
634 	if_debug1('U', "[U]checking mem=0x%lx\n", (ulong) mem);
635 	for (cp = mem->cfirst; cp != 0; cp = cp->cnext) {
636 	    if (ptr_is_within_chunk(ptr, cp)) {
637 		if_debug3('U', "[U+]in new chunk 0x%lx: 0x%lx, 0x%lx\n",
638 			  (ulong) cp,
639 			  (ulong) cp->cbase, (ulong) cp->cend);
640 		return true;
641 	    }
642 	    if_debug1('U', "[U-]not in 0x%lx\n", (ulong) cp);
643 	}
644 	if (mem->saved == save) {	/* We've checked all the more recent saves, */
645 	    /* must be OK. */
646 	    break;
647 	}
648     }
649 
650     /*
651      * If we're about to do a global restore (a restore to the level 0),
652      * and there is only one context using this global VM
653      * (the normal case, in which global VM is saved by the
654      * outermost save), we also have to check the global save.
655      * Global saves can't be nested, which makes things easy.
656      */
657     if (save->state.save_level == 0 /* Restoring to save level 0 - see bug 688157, 688161 */ &&
658 	(mem = save->space_global) != save->space_local &&
659 	save->space_global->num_contexts == 1
660 	) {
661 	const chunk_t *cp;
662 
663 	if_debug1('U', "[U]checking global mem=0x%lx\n", (ulong) mem);
664 	for (cp = mem->cfirst; cp != 0; cp = cp->cnext)
665 	    if (ptr_is_within_chunk(ptr, cp)) {
666 		if_debug3('U', "[U+]  new chunk 0x%lx: 0x%lx, 0x%lx\n",
667 			  (ulong) cp, (ulong) cp->cbase, (ulong) cp->cend);
668 		return true;
669 	    }
670     }
671     return false;
672 
673 #undef ptr
674 }
675 
676 /* Test whether a name would be invalidated by a restore. */
677 bool
alloc_name_is_since_save(const gs_memory_t * mem,const ref * pnref,const alloc_save_t * save)678 alloc_name_is_since_save(const gs_memory_t *mem,
679 			 const ref * pnref, const alloc_save_t * save)
680 {
681     const name_string_t *pnstr;
682 
683     if (!save->restore_names)
684 	return false;
685     pnstr = names_string_inline(mem->gs_lib_ctx->gs_name_table, pnref);
686     if (pnstr->foreign_string)
687 	return false;
688     return alloc_is_since_save(pnstr->string_bytes, save);
689 }
690 bool
alloc_name_index_is_since_save(const gs_memory_t * mem,uint nidx,const alloc_save_t * save)691 alloc_name_index_is_since_save(const gs_memory_t *mem,
692 			       uint nidx, const alloc_save_t *save)
693 {
694     const name_string_t *pnstr;
695 
696     if (!save->restore_names)
697 	return false;
698     pnstr = names_index_string_inline(mem->gs_lib_ctx->gs_name_table, nidx);
699     if (pnstr->foreign_string)
700 	return false;
701     return alloc_is_since_save(pnstr->string_bytes, save);
702 }
703 
704 /* Check whether any names have been created since a given save */
705 /* that might be released by the restore. */
706 bool
alloc_any_names_since_save(const alloc_save_t * save)707 alloc_any_names_since_save(const alloc_save_t * save)
708 {
709     return save->restore_names;
710 }
711 
712 /* Get the saved state with a given ID. */
713 alloc_save_t *
alloc_find_save(const gs_dual_memory_t * dmem,ulong sid)714 alloc_find_save(const gs_dual_memory_t * dmem, ulong sid)
715 {
716     alloc_save_t *sprev = dmem->space_local->saved;
717 
718     if (sid == 0)
719 	return 0;		/* invalid id */
720     while (sprev != 0) {
721 	if (sprev->id == sid)
722 	    return sprev;
723 	sprev = sprev->state.saved;
724     }
725     return 0;
726 }
727 
728 /* Get the client data from a saved state. */
729 void *
alloc_save_client_data(const alloc_save_t * save)730 alloc_save_client_data(const alloc_save_t * save)
731 {
732     return save->client_data;
733 }
734 
735 /*
736  * Do one step of restoring the state.  The client is responsible for
737  * calling alloc_find_save to get the save object, and for ensuring that
738  * there are no surviving pointers for which alloc_is_since_save is true.
739  * Return true if the argument was the innermost save, in which case
740  * this is the last (or only) step.
741  * Note that "one step" may involve multiple internal steps,
742  * if this is the outermost restore (which requires restoring both local
743  * and global VM) or if we created extra save levels to reduce scanning.
744  */
745 static void restore_finalize(gs_ref_memory_t *);
746 static void restore_space(gs_ref_memory_t *, gs_dual_memory_t *);
747 
748 int
alloc_restore_step_in(gs_dual_memory_t * dmem,alloc_save_t * save)749 alloc_restore_step_in(gs_dual_memory_t *dmem, alloc_save_t * save)
750 {
751     /* Get save->space_* now, because the save object will be freed. */
752     gs_ref_memory_t *lmem = save->space_local;
753     gs_ref_memory_t *gmem = save->space_global;
754     gs_ref_memory_t *mem = lmem;
755     alloc_save_t *sprev;
756     int code;
757 
758     /* Finalize all objects before releasing resources or undoing changes. */
759     do {
760 	ulong sid;
761 
762 	sprev = mem->saved;
763 	sid = sprev->id;
764 	restore_finalize(mem);	/* finalize objects */
765 	mem = &sprev->state;
766 	if (sid != 0)
767 	    break;
768     }
769     while (sprev != save);
770     if (mem->save_level == 0) {
771 	/* This is the outermost save, which might also */
772 	/* need to restore global VM. */
773 	mem = gmem;
774 	if (mem != lmem && mem->saved != 0)
775 	    restore_finalize(mem);
776     }
777 
778     /* Do one (externally visible) step of restoring the state. */
779     mem = lmem;
780     do {
781 	ulong sid;
782 
783 	sprev = mem->saved;
784 	sid = sprev->id;
785 	code = restore_resources(sprev, mem);	/* release other resources */
786 	if (code < 0)
787 	    return code;
788 	restore_space(mem, dmem);	/* release memory */
789 	if (sid != 0)
790 	    break;
791     }
792     while (sprev != save);
793 
794     if (mem->save_level == 0) {
795 	/* This is the outermost save, which might also */
796 	/* need to restore global VM. */
797 	mem = gmem;
798 	if (mem != lmem && mem->saved != 0) {
799 	    code = restore_resources(mem->saved, mem);
800 	    if (code < 0)
801 		return code;
802 	    restore_space(mem, dmem);
803 	}
804 	alloc_set_not_in_save(dmem);
805     } else {			/* Set the l_new attribute in all slots that are now new. */
806 	ulong scanned;
807 
808 	code = save_set_new(mem, true, false, &scanned);
809 	if (code < 0)
810 	    return code;
811     }
812 
813     return sprev == save;
814 }
815 /* Restore the memory of one space, by undoing changes and freeing */
816 /* memory allocated since the save. */
817 static void
restore_space(gs_ref_memory_t * mem,gs_dual_memory_t * dmem)818 restore_space(gs_ref_memory_t * mem, gs_dual_memory_t *dmem)
819 {
820     alloc_save_t *save = mem->saved;
821     alloc_save_t saved;
822 
823     print_save("restore", mem->space, save);
824 
825     /* Undo changes since the save. */
826     {
827 	register alloc_change_t *cp = mem->changes;
828 
829 	while (cp) {
830 #ifdef DEBUG
831 	    if (gs_debug_c('U')) {
832 		dlputs("[U]restore");
833 		alloc_save_print(cp, true);
834 	    }
835 #endif
836 	    if (cp->offset == AC_OFFSET_ALLOCATED)
837 		DO_NOTHING;
838 	    else
839 	    if (r_is_packed(&cp->contents))
840 		*cp->where = *(ref_packed *) & cp->contents;
841 	    else
842 		ref_assign_inline((ref *) cp->where, &cp->contents);
843 	    cp = cp->next;
844 	}
845     }
846 
847     /* Free memory allocated since the save. */
848     /* Note that this frees all chunks except the inner ones */
849     /* belonging to this level. */
850     saved = *save;
851     restore_free(mem);
852 
853     /* Restore the allocator state. */
854     {
855 	int num_contexts = mem->num_contexts;	/* don't restore */
856 
857 	*mem = saved.state;
858 	mem->num_contexts = num_contexts;
859     }
860     alloc_open_chunk(mem);
861 
862     /* Make the allocator current if it was current before the save. */
863     if (saved.is_current) {
864 	dmem->current = mem;
865 	dmem->current_space = mem->space;
866     }
867 }
868 
869 /* Restore to the initial state, releasing all resources. */
870 /* The allocator is no longer usable after calling this routine! */
871 int
alloc_restore_all(gs_dual_memory_t * dmem)872 alloc_restore_all(gs_dual_memory_t * dmem)
873 {
874     /*
875      * Save the memory pointers, since freeing space_local will also
876      * free dmem itself.
877      */
878     gs_ref_memory_t *lmem = dmem->space_local;
879     gs_ref_memory_t *gmem = dmem->space_global;
880     gs_ref_memory_t *smem = dmem->space_system;
881     gs_ref_memory_t *mem;
882     int code;
883 
884     /* Restore to a state outside any saves. */
885     while (lmem->save_level != 0) {
886 	code = alloc_restore_step_in(dmem, lmem->saved);
887 	if (code < 0)
888 	    return code;
889     }
890 
891     /* Finalize memory. */
892     restore_finalize(lmem);
893     if ((mem = (gs_ref_memory_t *)lmem->stable_memory) != lmem)
894 	restore_finalize(mem);
895     if (gmem != lmem && gmem->num_contexts == 1) {
896 	restore_finalize(gmem);
897 	if ((mem = (gs_ref_memory_t *)gmem->stable_memory) != gmem)
898 	    restore_finalize(mem);
899     }
900     restore_finalize(smem);
901 
902     /* Release resources other than memory, using fake */
903     /* save and memory objects. */
904     {
905 	alloc_save_t empty_save;
906 
907 	empty_save.spaces = dmem->spaces;
908 	empty_save.restore_names = false;	/* don't bother to release */
909 	code = restore_resources(&empty_save, NULL);
910 	if (code < 0)
911 	    return code;
912     }
913 
914     /* Finally, release memory. */
915     restore_free(lmem);
916     if ((mem = (gs_ref_memory_t *)lmem->stable_memory) != lmem)
917 	restore_free(mem);
918     if (gmem != lmem) {
919 	if (!--(gmem->num_contexts)) {
920 	    restore_free(gmem);
921 	    if ((mem = (gs_ref_memory_t *)gmem->stable_memory) != gmem)
922 		restore_free(mem);
923 	}
924     }
925     restore_free(smem);
926     return 0;
927 }
928 
929 /*
930  * Finalize objects that will be freed by a restore.
931  * Note that we must temporarily disable the freeing operations
932  * of the allocator while doing this.
933  */
934 static void
restore_finalize(gs_ref_memory_t * mem)935 restore_finalize(gs_ref_memory_t * mem)
936 {
937     chunk_t *cp;
938 
939     alloc_close_chunk(mem);
940     gs_enable_free((gs_memory_t *) mem, false);
941     for (cp = mem->clast; cp != 0; cp = cp->cprev) {
942 	SCAN_CHUNK_OBJECTS(cp)
943 	    DO_ALL
944 	    struct_proc_finalize((*finalize)) =
945 	    pre->o_type->finalize;
946 	if (finalize != 0) {
947 	    if_debug2('u', "[u]restore finalizing %s 0x%lx\n",
948 		      struct_type_name_string(pre->o_type),
949 		      (ulong) (pre + 1));
950 	    (*finalize) (pre + 1);
951 	}
952 	END_OBJECTS_SCAN
953     }
954     gs_enable_free((gs_memory_t *) mem, true);
955 }
956 
957 /* Release resources for a restore */
958 static int
restore_resources(alloc_save_t * sprev,gs_ref_memory_t * mem)959 restore_resources(alloc_save_t * sprev, gs_ref_memory_t * mem)
960 {
961     int code;
962 #ifdef DEBUG
963     if (mem) {
964 	/* Note restoring of the file list. */
965 	if_debug4('u', "[u%u]file_restore 0x%lx => 0x%lx for 0x%lx\n",
966 		  mem->space, (ulong)mem->streams,
967 		  (ulong)sprev->state.streams, (ulong) sprev);
968     }
969 #endif
970 
971     /* Remove entries from font and character caches. */
972     code = font_restore(sprev);
973     if (code < 0)
974 	return code;
975 
976     /* Adjust the name table. */
977     if (sprev->restore_names)
978 	names_restore(mem->gs_lib_ctx->gs_name_table, sprev);
979     return 0;
980 }
981 
982 /* Release memory for a restore. */
983 static void
restore_free(gs_ref_memory_t * mem)984 restore_free(gs_ref_memory_t * mem)
985 {
986     /* Free chunks allocated since the save. */
987     gs_free_all((gs_memory_t *) mem);
988 }
989 
990 /* Forget a save, by merging this level with the next outer one. */
991 static void file_forget_save(gs_ref_memory_t *);
992 static void combine_space(gs_ref_memory_t *);
993 static void forget_changes(gs_ref_memory_t *);
994 int
alloc_forget_save_in(gs_dual_memory_t * dmem,alloc_save_t * save)995 alloc_forget_save_in(gs_dual_memory_t *dmem, alloc_save_t * save)
996 {
997     gs_ref_memory_t *mem = save->space_local;
998     alloc_save_t *sprev;
999     ulong scanned;
1000     int code;
1001 
1002     print_save("forget_save", mem->space, save);
1003 
1004     /* Iteratively combine the current level with the previous one. */
1005     do {
1006 	sprev = mem->saved;
1007 	if (sprev->id != 0)
1008 	    mem->save_level--;
1009 	if (mem->save_level != 0) {
1010 	    alloc_change_t *chp = mem->changes;
1011 
1012 	    code = save_set_new(&sprev->state, true, false, &scanned);
1013 	    if (code < 0)
1014 		return code;
1015 	    /* Concatenate the changes chains. */
1016 	    if (chp == 0)
1017 		mem->changes = sprev->state.changes;
1018 	    else {
1019 		while (chp->next != 0)
1020 		    chp = chp->next;
1021 		chp->next = sprev->state.changes;
1022 	    }
1023 	    file_forget_save(mem);
1024 	    combine_space(mem);	/* combine memory */
1025 	} else {
1026 	    forget_changes(mem);
1027 	    code = save_set_new(mem, false, false, &scanned);
1028 	    if (code < 0)
1029 		return code;
1030 	    file_forget_save(mem);
1031 	    combine_space(mem);	/* combine memory */
1032 	    /* This is the outermost save, which might also */
1033 	    /* need to combine global VM. */
1034 	    mem = save->space_global;
1035 	    if (mem != save->space_local && mem->saved != 0) {
1036 		forget_changes(mem);
1037 		code = save_set_new(mem, false, false, &scanned);
1038 		if (code < 0)
1039 		    return code;
1040 		file_forget_save(mem);
1041 		combine_space(mem);
1042 	    }
1043 	    alloc_set_not_in_save(dmem);
1044 	    break;		/* must be outermost */
1045 	}
1046     }
1047     while (sprev != save);
1048     return 0;
1049 }
1050 /* Combine the chunks of the next outer level with those of the current one, */
1051 /* and free the bookkeeping structures. */
1052 static void
combine_space(gs_ref_memory_t * mem)1053 combine_space(gs_ref_memory_t * mem)
1054 {
1055     alloc_save_t *saved = mem->saved;
1056     gs_ref_memory_t *omem = &saved->state;
1057     chunk_t *cp;
1058     chunk_t *csucc;
1059 
1060     alloc_close_chunk(mem);
1061     for (cp = mem->cfirst; cp != 0; cp = csucc) {
1062 	csucc = cp->cnext;	/* save before relinking */
1063 	if (cp->outer == 0)
1064 	    alloc_link_chunk(cp, omem);
1065 	else {
1066 	    chunk_t *outer = cp->outer;
1067 
1068 	    outer->inner_count--;
1069 	    if (mem->pcc == cp)
1070 		mem->pcc = outer;
1071 	    if (mem->cfreed.cp == cp)
1072 		mem->cfreed.cp = outer;
1073 	    /* "Free" the header of the inner chunk, */
1074 	    /* and any immediately preceding gap left by */
1075 	    /* the GC having compacted the outer chunk. */
1076 	    {
1077 		obj_header_t *hp = (obj_header_t *) outer->cbot;
1078 
1079 		hp->o_alone = 0;
1080 		hp->o_size = (char *)(cp->chead + 1)
1081 		    - (char *)(hp + 1);
1082 		hp->o_type = &st_bytes;
1083 		/* The following call is probably not safe. */
1084 #if 0				/* **************** */
1085 		gs_free_object((gs_memory_t *) mem,
1086 			       hp + 1, "combine_space(header)");
1087 #endif /* **************** */
1088 	    }
1089 	    /* Update the outer chunk's allocation pointers. */
1090 	    outer->cbot = cp->cbot;
1091 	    outer->rcur = cp->rcur;
1092 	    outer->rtop = cp->rtop;
1093 	    outer->ctop = cp->ctop;
1094 	    outer->has_refs |= cp->has_refs;
1095 	    gs_free_object(mem->non_gc_memory, cp,
1096 			   "combine_space(inner)");
1097 	}
1098     }
1099     /* Update relevant parts of allocator state. */
1100     mem->cfirst = omem->cfirst;
1101     mem->clast = omem->clast;
1102     mem->allocated += omem->allocated;
1103     mem->gc_allocated += omem->allocated;
1104     mem->lost.objects += omem->lost.objects;
1105     mem->lost.refs += omem->lost.refs;
1106     mem->lost.strings += omem->lost.strings;
1107     mem->saved = omem->saved;
1108     mem->previous_status = omem->previous_status;
1109     {				/* Concatenate free lists. */
1110 	int i;
1111 
1112 	for (i = 0; i < num_freelists; i++) {
1113 	    obj_header_t *olist = omem->freelists[i];
1114 	    obj_header_t *list = mem->freelists[i];
1115 
1116 	    if (olist == 0);
1117 	    else if (list == 0)
1118 		mem->freelists[i] = olist;
1119 	    else {
1120 		while (*(obj_header_t **) list != 0)
1121 		    list = *(obj_header_t **) list;
1122 		*(obj_header_t **) list = olist;
1123 	    }
1124 	}
1125 	if (omem->largest_free_size > mem->largest_free_size)
1126 	    mem->largest_free_size = omem->largest_free_size;
1127     }
1128     gs_free_object((gs_memory_t *) mem, saved, "combine_space(saved)");
1129     alloc_open_chunk(mem);
1130 }
1131 /* Free the changes chain for a level 0 .forgetsave, */
1132 /* resetting the l_new flag in the changed refs. */
1133 static void
forget_changes(gs_ref_memory_t * mem)1134 forget_changes(gs_ref_memory_t * mem)
1135 {
1136     register alloc_change_t *chp = mem->changes;
1137     alloc_change_t *next;
1138 
1139     for (; chp; chp = next) {
1140 	ref_packed *prp = chp->where;
1141 
1142 	if_debug1('U', "[U]forgetting change 0x%lx\n", (ulong) chp);
1143 	if (chp->offset == AC_OFFSET_ALLOCATED)
1144 	    DO_NOTHING;
1145 	else
1146 	if (!r_is_packed(prp))
1147 	    r_clear_attrs((ref *) prp, l_new);
1148 	next = chp->next;
1149 	gs_free_object((gs_memory_t *) mem, chp, "forget_changes");
1150     }
1151     mem->changes = 0;
1152 }
1153 /* Update the streams list when forgetting a save. */
1154 static void
file_forget_save(gs_ref_memory_t * mem)1155 file_forget_save(gs_ref_memory_t * mem)
1156 {
1157     const alloc_save_t *save = mem->saved;
1158     stream *streams = mem->streams;
1159     stream *saved_streams = save->state.streams;
1160 
1161     if_debug4('u', "[u%d]file_forget_save 0x%lx + 0x%lx for 0x%lx\n",
1162 	      mem->space, (ulong) streams, (ulong) saved_streams,
1163 	      (ulong) save);
1164     if (streams == 0)
1165 	mem->streams = saved_streams;
1166     else if (saved_streams != 0) {
1167 	while (streams->next != 0)
1168 	    streams = streams->next;
1169 	streams->next = saved_streams;
1170 	saved_streams->prev = streams;
1171     }
1172 }
1173 
1174 static inline int
mark_allocated(void * obj,bool to_new,uint * psize)1175 mark_allocated(void *obj, bool to_new, uint *psize)
1176 {
1177     obj_header_t *pre = (obj_header_t *)obj - 1;
1178     uint size = pre_obj_contents_size(pre);
1179     ref_packed *prp = (ref_packed *) (pre + 1);
1180     ref_packed *next = (ref_packed *) ((char *)prp + size);
1181 #ifdef ALIGNMENT_ALIASING_BUG
1182 		ref *rpref;
1183 # define RP_REF(rp) (rpref = (ref *)rp, rpref)
1184 #else
1185 # define RP_REF(rp) ((ref *)rp)
1186 #endif
1187 
1188     if (pre->o_type != &st_refs) {
1189 	/* Must not happen. */
1190 	if_debug0('u', "Wrong object type when expected a ref.\n");
1191 	return_error(e_Fatal);
1192     }
1193     /* We know that every block of refs ends with */
1194     /* a full-size ref, so we only need the end check */
1195     /* when we encounter one of those. */
1196     if (to_new)
1197 	while (1) {
1198 	    if (r_is_packed(prp))
1199 		prp++;
1200 	    else {
1201 		RP_REF(prp)->tas.type_attrs |= l_new;
1202 		prp += packed_per_ref;
1203 		if (prp >= next)
1204 		    break;
1205 	    }
1206     } else
1207 	while (1) {
1208 	    if (r_is_packed(prp))
1209 		prp++;
1210 	    else {
1211 		RP_REF(prp)->tas.type_attrs &= ~l_new;
1212 		prp += packed_per_ref;
1213 		if (prp >= next)
1214 		    break;
1215 	    }
1216 	}
1217 #undef RP_REF
1218     *psize = size;
1219     return 0;
1220 }
1221 
1222 /* Check if a block contains refs marked by garbager. */
1223 static bool
check_l_mark(void * obj)1224 check_l_mark(void *obj)
1225 {
1226     obj_header_t *pre = (obj_header_t *)obj - 1;
1227     uint size = pre_obj_contents_size(pre);
1228     ref_packed *prp = (ref_packed *) (pre + 1);
1229     ref_packed *next = (ref_packed *) ((char *)prp + size);
1230 #ifdef ALIGNMENT_ALIASING_BUG
1231 		ref *rpref;
1232 # define RP_REF(rp) (rpref = (ref *)rp, rpref)
1233 #else
1234 # define RP_REF(rp) ((ref *)rp)
1235 #endif
1236 
1237     /* We know that every block of refs ends with */
1238     /* a full-size ref, so we only need the end check */
1239     /* when we encounter one of those. */
1240     while (1) {
1241 	if (r_is_packed(prp)) {
1242 	    if (r_has_pmark(prp))
1243 		return true;
1244 	    prp++;
1245 	} else {
1246 	    if (r_has_attr(RP_REF(prp), l_mark))
1247 		return true;
1248 	    prp += packed_per_ref;
1249 	    if (prp >= next)
1250 		return false;
1251 	}
1252     }
1253 #undef RP_REF
1254 }
1255 
1256 /* Set or reset the l_new attribute in every relevant slot. */
1257 /* This includes every slot on the current change chain, */
1258 /* and every (ref) slot allocated at this save level. */
1259 /* Return the number of bytes of data scanned. */
1260 static int
save_set_new(gs_ref_memory_t * mem,bool to_new,bool set_limit,ulong * pscanned)1261 save_set_new(gs_ref_memory_t * mem, bool to_new, bool set_limit, ulong *pscanned)
1262 {
1263     ulong scanned = 0;
1264     int code;
1265 
1266     /* Handle the change chain. */
1267     code = save_set_new_changes(mem, to_new, set_limit);
1268     if (code < 0)
1269 	return code;
1270 
1271     /* Handle newly allocated ref objects. */
1272     SCAN_MEM_CHUNKS(mem, cp) {
1273 	if (cp->has_refs) {
1274 	    bool has_refs = false;
1275 
1276 	    SCAN_CHUNK_OBJECTS(cp)
1277 		DO_ALL
1278 		if_debug3('U', "[U]set_new scan(0x%lx(%u), %d)\n",
1279 			  (ulong) pre, size, to_new);
1280 	    if (pre->o_type == &st_refs) {
1281 		/* These are refs, scan them. */
1282 		ref_packed *prp = (ref_packed *) (pre + 1);
1283 		uint size;
1284 
1285 		code = mark_allocated(prp, to_new, &size);
1286 		if (code < 0)
1287 		    return code;
1288 		scanned += size;
1289 	    } else
1290 		scanned += sizeof(obj_header_t);
1291 	    END_OBJECTS_SCAN
1292 		cp->has_refs = has_refs;
1293 	}
1294     }
1295     END_CHUNKS_SCAN
1296 	if_debug2('u', "[u]set_new (%s) scanned %ld\n",
1297 		  (to_new ? "restore" : "save"), scanned);
1298     *pscanned = scanned;
1299     return 0;
1300 }
1301 
1302 /* Drop redundant elements from the changes list and set l_new. */
1303 static void
drop_redundant_changes(gs_ref_memory_t * mem)1304 drop_redundant_changes(gs_ref_memory_t * mem)
1305 {
1306     register alloc_change_t *chp = mem->changes, *chp_back = NULL, *chp_forth;
1307 
1308     /* First reverse the list and set all. */
1309     for (; chp; chp = chp_forth) {
1310 	chp_forth = chp->next;
1311 	if (chp->offset != AC_OFFSET_ALLOCATED) {
1312 	    ref_packed *prp = chp->where;
1313 
1314 	    if (!r_is_packed(prp)) {
1315 		ref *const rp = (ref *)prp;
1316 
1317 		rp->tas.type_attrs |= l_new;
1318 	    }
1319 	}
1320 	chp->next = chp_back;
1321 	chp_back = chp;
1322     }
1323     mem->changes = chp_back;
1324     chp_back = NULL;
1325     /* Then filter, reset and reverse again. */
1326     for (chp = mem->changes; chp; chp = chp_forth) {
1327 	chp_forth = chp->next;
1328 	if (chp->offset != AC_OFFSET_ALLOCATED) {
1329 	    ref_packed *prp = chp->where;
1330 
1331 	    if (!r_is_packed(prp)) {
1332 		ref *const rp = (ref *)prp;
1333 
1334 		if ((rp->tas.type_attrs & l_new) == 0) {
1335 		    if (mem->scan_limit == chp)
1336 			mem->scan_limit = chp_back;
1337 		    if (mem->changes == chp)
1338 			mem->changes = chp_back;
1339 	    	    gs_free_object((gs_memory_t *)mem, chp, "alloc_save_remove");
1340 		    continue;
1341 		} else
1342 		    rp->tas.type_attrs &= ~l_new;
1343 	    }
1344 	}
1345 	chp->next = chp_back;
1346 	chp_back = chp;
1347     }
1348     mem->changes = chp_back;
1349 }
1350 
1351 
1352 /* Set or reset the l_new attribute on the changes chain. */
1353 static int
save_set_new_changes(gs_ref_memory_t * mem,bool to_new,bool set_limit)1354 save_set_new_changes(gs_ref_memory_t * mem, bool to_new, bool set_limit)
1355 {
1356     register alloc_change_t *chp;
1357     register uint new = (to_new ? l_new : 0);
1358     ulong scanned = 0;
1359 
1360     if (!to_new && mem->total_scanned_after_compacting > max_repeated_scan * 16) {
1361 	mem->total_scanned_after_compacting = 0;
1362 	drop_redundant_changes(mem);
1363     }
1364     for (chp = mem->changes; chp; chp = chp->next) {
1365 	if (chp->offset == AC_OFFSET_ALLOCATED) {
1366 	    if (chp->where != 0) {
1367 		uint size;
1368 		int code = mark_allocated((void *)chp->where, to_new, &size);
1369 
1370 		if (code < 0)
1371 		    return code;
1372 		scanned += size;
1373 	    }
1374 	} else {
1375 	    ref_packed *prp = chp->where;
1376 
1377 	    if_debug3('U', "[U]set_new 0x%lx: (0x%lx, %d)\n",
1378 		    (ulong)chp, (ulong)prp, new);
1379 	    if (!r_is_packed(prp)) {
1380 		ref *const rp = (ref *) prp;
1381 
1382 		rp->tas.type_attrs =
1383 		    (rp->tas.type_attrs & ~l_new) + new;
1384 	    }
1385 	}
1386 	if (mem->scan_limit == chp)
1387 	    break;
1388     }
1389     if (set_limit) {
1390         mem->total_scanned_after_compacting += scanned;
1391 	if (scanned  + mem->total_scanned >= max_repeated_scan) {
1392 	    mem->scan_limit = mem->changes;
1393 	    mem->total_scanned = 0;
1394 	} else
1395 	    mem->total_scanned += scanned;
1396     }
1397     return 0;
1398 }
1399