1 /* Copyright (C) 1995, 2000 artofcode LLC. All rights reserved.
2
3 This program is free software; you can redistribute it and/or modify it
4 under the terms of the GNU General Public License as published by the
5 Free Software Foundation; either version 2 of the License, or (at your
6 option) any later version.
7
8 This program is distributed in the hope that it will be useful, but
9 WITHOUT ANY WARRANTY; without even the implied warranty of
10 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 General Public License for more details.
12
13 You should have received a copy of the GNU General Public License along
14 with this program; if not, write to the Free Software Foundation, Inc.,
15 59 Temple Place, Suite 330, Boston, MA, 02111-1307.
16
17 */
18
19 /*$Id: gsalloc.c,v 1.11.2.2.2.1 2003/01/17 00:49:02 giles Exp $ */
20 /* Standard memory allocator */
21 #include "gx.h"
22 #include "memory_.h"
23 #include "gserrors.h"
24 #include "gsexit.h"
25 #include "gsmdebug.h"
26 #include "gsstruct.h"
27 #include "gxalloc.h"
28 #include "stream.h" /* for clearing stream list */
29
30 /*
31 * This allocator produces tracing messages of the form
32 * [aNMOTS]...
33 * where
34 * N is the VM space number, +1 if we are allocating from stable memory.
35 * M is : for movable objects, | for immovable,
36 * O is {alloc = +, free = -, grow = >, shrink = <},
37 * T is {bytes = b, object = <, ref = $, string = >}, and
38 * S is {small freelist = f, large freelist = F, LIFO = space,
39 * own chunk = L, lost = #, lost own chunk = ~, other = .}.
40 */
41 #ifdef DEBUG
42 private int
alloc_trace_space(const gs_ref_memory_t * imem)43 alloc_trace_space(const gs_ref_memory_t *imem)
44 {
45 return imem->space + (imem->stable_memory == (const gs_memory_t *)imem);
46 }
47 private void
alloc_trace(const char * chars,gs_ref_memory_t * imem,client_name_t cname,gs_memory_type_ptr_t stype,uint size,const void * ptr)48 alloc_trace(const char *chars, gs_ref_memory_t * imem, client_name_t cname,
49 gs_memory_type_ptr_t stype, uint size, const void *ptr)
50 {
51 if_debug7('A', "[a%d%s]%s %s(%u) %s0x%lx\n",
52 alloc_trace_space(imem), chars, client_name_string(cname),
53 (ptr == 0 || stype == 0 ? "" :
54 struct_type_name_string(stype)),
55 size, (chars[1] == '+' ? "= " : ""), (ulong) ptr);
56 }
57 private bool
alloc_size_is_ok(gs_memory_type_ptr_t stype)58 alloc_size_is_ok(gs_memory_type_ptr_t stype)
59 {
60 return (stype->ssize > 0 && stype->ssize < 0x100000);
61 }
62 # define ALLOC_CHECK_SIZE(stype)\
63 BEGIN\
64 if (!alloc_size_is_ok(stype)) {\
65 lprintf2("size of struct type 0x%lx is 0x%lx!\n",\
66 (ulong)(stype), (ulong)((stype)->ssize));\
67 return 0;\
68 }\
69 END
70 #else
71 # define alloc_trace(chars, imem, cname, stype, size, ptr) DO_NOTHING
72 # define ALLOC_CHECK_SIZE(stype) DO_NOTHING
73 #endif
74
75 /*
76 * The structure descriptor for allocators. Even though allocators
77 * are allocated outside GC space, they reference objects within it.
78 */
79 public_st_ref_memory();
80 private
81 ENUM_PTRS_BEGIN(ref_memory_enum_ptrs) return 0;
82 ENUM_PTR3(0, gs_ref_memory_t, streams, names_array, changes);
83 ENUM_PTR(3, gs_ref_memory_t, saved);
84 ENUM_PTRS_END
RELOC_PTRS_WITH(ref_memory_reloc_ptrs,gs_ref_memory_t * mptr)85 private RELOC_PTRS_WITH(ref_memory_reloc_ptrs, gs_ref_memory_t *mptr)
86 {
87 RELOC_PTR(gs_ref_memory_t, streams);
88 RELOC_PTR(gs_ref_memory_t, names_array);
89 RELOC_PTR(gs_ref_memory_t, changes);
90 /* Don't relocate the saved pointer now -- see igc.c for details. */
91 mptr->reloc_saved = RELOC_OBJ(mptr->saved);
92 }
93 RELOC_PTRS_END
94
95 /*
96 * Define the flags for alloc_obj, which implements all but the fastest
97 * case of allocation.
98 */
99 typedef enum {
100 ALLOC_IMMOVABLE = 1,
101 ALLOC_DIRECT = 2 /* called directly, without fast-case checks */
102 } alloc_flags_t;
103
104 /* Forward references */
105 private void remove_range_from_freelist(P3(gs_ref_memory_t *mem, void* bottom, void* top));
106 private obj_header_t *large_freelist_alloc(P2(gs_ref_memory_t *mem, uint size));
107 private obj_header_t *scavenge_low_free(P2(gs_ref_memory_t *mem, unsigned request_size));
108 private ulong compute_free_objects(P1(gs_ref_memory_t *));
109 private obj_header_t *alloc_obj(P5(gs_ref_memory_t *, ulong, gs_memory_type_ptr_t, alloc_flags_t, client_name_t));
110 private void consolidate_chunk_free(P2(chunk_t *cp, gs_ref_memory_t *mem));
111 private void trim_obj(P4(gs_ref_memory_t *mem, obj_header_t *obj, uint size, chunk_t *cp));
112 private chunk_t *alloc_acquire_chunk(P4(gs_ref_memory_t *, ulong, bool, client_name_t));
113 private chunk_t *alloc_add_chunk(P3(gs_ref_memory_t *, ulong, client_name_t));
114 void alloc_close_chunk(P1(gs_ref_memory_t *));
115
116 /*
117 * Define the standard implementation (with garbage collection)
118 * of Ghostscript's memory manager interface.
119 */
120 /* Raw memory procedures */
121 private gs_memory_proc_alloc_bytes(i_alloc_bytes_immovable);
122 private gs_memory_proc_resize_object(i_resize_object);
123 private gs_memory_proc_free_object(i_free_object);
124 private gs_memory_proc_stable(i_stable);
125 private gs_memory_proc_status(i_status);
126 private gs_memory_proc_free_all(i_free_all);
127 private gs_memory_proc_consolidate_free(i_consolidate_free);
128
129 /* Object memory procedures */
130 private gs_memory_proc_alloc_bytes(i_alloc_bytes);
131 private gs_memory_proc_alloc_struct(i_alloc_struct);
132 private gs_memory_proc_alloc_struct(i_alloc_struct_immovable);
133 private gs_memory_proc_alloc_byte_array(i_alloc_byte_array);
134 private gs_memory_proc_alloc_byte_array(i_alloc_byte_array_immovable);
135 private gs_memory_proc_alloc_struct_array(i_alloc_struct_array);
136 private gs_memory_proc_alloc_struct_array(i_alloc_struct_array_immovable);
137 private gs_memory_proc_object_size(i_object_size);
138 private gs_memory_proc_object_type(i_object_type);
139 private gs_memory_proc_alloc_string(i_alloc_string);
140 private gs_memory_proc_alloc_string(i_alloc_string_immovable);
141 private gs_memory_proc_resize_string(i_resize_string);
142 private gs_memory_proc_free_string(i_free_string);
143 private gs_memory_proc_register_root(i_register_root);
144 private gs_memory_proc_unregister_root(i_unregister_root);
145 private gs_memory_proc_enable_free(i_enable_free);
146
147 /* We export the procedures for subclasses. */
148 const gs_memory_procs_t gs_ref_memory_procs =
149 {
150 /* Raw memory procedures */
151 i_alloc_bytes_immovable,
152 i_resize_object,
153 i_free_object,
154 i_stable,
155 i_status,
156 i_free_all,
157 i_consolidate_free,
158 /* Object memory procedures */
159 i_alloc_bytes,
160 i_alloc_struct,
161 i_alloc_struct_immovable,
162 i_alloc_byte_array,
163 i_alloc_byte_array_immovable,
164 i_alloc_struct_array,
165 i_alloc_struct_array_immovable,
166 i_object_size,
167 i_object_type,
168 i_alloc_string,
169 i_alloc_string_immovable,
170 i_resize_string,
171 i_free_string,
172 i_register_root,
173 i_unregister_root,
174 i_enable_free
175 };
176
177 /*
178 * Allocate and mostly initialize the state of an allocator (system, global,
179 * or local). Does not initialize global or space.
180 */
181 private void *ialloc_solo(P3(gs_raw_memory_t *, gs_memory_type_ptr_t,
182 chunk_t **));
183 gs_ref_memory_t *
ialloc_alloc_state(gs_raw_memory_t * parent,uint chunk_size)184 ialloc_alloc_state(gs_raw_memory_t * parent, uint chunk_size)
185 {
186 chunk_t *cp;
187 gs_ref_memory_t *iimem = ialloc_solo(parent, &st_ref_memory, &cp);
188
189 if (iimem == 0)
190 return 0;
191 iimem->stable_memory = (gs_memory_t *)iimem;
192 iimem->procs = gs_ref_memory_procs;
193 iimem->parent = parent;
194 iimem->chunk_size = chunk_size;
195 iimem->large_size = ((chunk_size / 4) & -obj_align_mod) + 1;
196 iimem->is_controlled = false;
197 iimem->gc_status.vm_threshold = chunk_size * 3L;
198 iimem->gc_status.max_vm = 0x7fffffff;
199 iimem->gc_status.psignal = NULL;
200 iimem->gc_status.signal_value = 0;
201 iimem->gc_status.enabled = false;
202 iimem->gc_status.requested = 0;
203 iimem->gc_allocated = 0;
204 iimem->previous_status.allocated = 0;
205 iimem->previous_status.used = 0;
206 ialloc_reset(iimem);
207 iimem->cfirst = iimem->clast = cp;
208 ialloc_set_limit(iimem);
209 iimem->cc.cbot = iimem->cc.ctop = 0;
210 iimem->pcc = 0;
211 iimem->save_level = 0;
212 iimem->new_mask = 0;
213 iimem->test_mask = ~0;
214 iimem->streams = 0;
215 iimem->names_array = 0;
216 iimem->roots = 0;
217 iimem->num_contexts = 0;
218 iimem->saved = 0;
219 return iimem;
220 }
221
222 /* Allocate a 'solo' object with its own chunk. */
223 private void *
ialloc_solo(gs_raw_memory_t * parent,gs_memory_type_ptr_t pstype,chunk_t ** pcp)224 ialloc_solo(gs_raw_memory_t * parent, gs_memory_type_ptr_t pstype,
225 chunk_t ** pcp)
226 { /*
227 * We can't assume that the parent uses the same object header
228 * that we do, but the GC requires that allocators have
229 * such a header. Therefore, we prepend one explicitly.
230 */
231 chunk_t *cp =
232 gs_raw_alloc_struct_immovable(parent, &st_chunk,
233 "ialloc_solo(chunk)");
234 uint csize =
235 ROUND_UP(sizeof(chunk_head_t) + sizeof(obj_header_t) +
236 pstype->ssize,
237 obj_align_mod);
238 byte *cdata = gs_alloc_bytes_immovable(parent, csize, "ialloc_solo");
239 obj_header_t *obj = (obj_header_t *) (cdata + sizeof(chunk_head_t));
240
241 if (cp == 0 || cdata == 0)
242 return 0;
243 alloc_init_chunk(cp, cdata, cdata + csize, false, (chunk_t *) NULL);
244 cp->cbot = cp->ctop;
245 cp->cprev = cp->cnext = 0;
246 /* Construct the object header "by hand". */
247 obj->o_alone = 1;
248 obj->o_size = pstype->ssize;
249 obj->o_type = pstype;
250 *pcp = cp;
251 return (void *)(obj + 1);
252 }
253
254 /*
255 * Add a chunk to an externally controlled allocator. Such allocators
256 * allocate all objects as immovable, are not garbage-collected, and
257 * don't attempt to acquire additional memory on their own.
258 */
259 int
ialloc_add_chunk(gs_ref_memory_t * imem,ulong space,client_name_t cname)260 ialloc_add_chunk(gs_ref_memory_t *imem, ulong space, client_name_t cname)
261 {
262 chunk_t *cp;
263
264 /* Allow acquisition of this chunk. */
265 imem->is_controlled = false;
266 imem->large_size = imem->chunk_size;
267 imem->limit = max_long;
268 imem->gc_status.max_vm = max_long;
269
270 /* Acquire the chunk. */
271 cp = alloc_add_chunk(imem, space, cname);
272
273 /*
274 * Make all allocations immovable. Since the "movable" allocators
275 * allocate within existing chunks, whereas the "immovable" ones
276 * allocate in new chunks, we equate the latter to the former, even
277 * though this seems backwards.
278 */
279 imem->procs.alloc_bytes_immovable = imem->procs.alloc_bytes;
280 imem->procs.alloc_struct_immovable = imem->procs.alloc_struct;
281 imem->procs.alloc_byte_array_immovable = imem->procs.alloc_byte_array;
282 imem->procs.alloc_struct_array_immovable = imem->procs.alloc_struct_array;
283 imem->procs.alloc_string_immovable = imem->procs.alloc_string;
284
285 /* Disable acquisition of additional chunks. */
286 imem->is_controlled = true;
287 imem->limit = 0;
288
289 return (cp ? 0 : gs_note_error(gs_error_VMerror));
290 }
291
292 /* Prepare for a GC by clearing the stream list. */
293 /* This probably belongs somewhere else.... */
294 void
ialloc_gc_prepare(gs_ref_memory_t * mem)295 ialloc_gc_prepare(gs_ref_memory_t * mem)
296 { /*
297 * We have to unlink every stream from its neighbors,
298 * so that referenced streams don't keep all streams around.
299 */
300 while (mem->streams != 0) {
301 stream *s = mem->streams;
302
303 mem->streams = s->next;
304 s->prev = s->next = 0;
305 }
306 }
307
308 /* Initialize after a save. */
309 void
ialloc_reset(gs_ref_memory_t * mem)310 ialloc_reset(gs_ref_memory_t * mem)
311 {
312 mem->cfirst = 0;
313 mem->clast = 0;
314 mem->cc.rcur = 0;
315 mem->cc.rtop = 0;
316 mem->cc.has_refs = false;
317 mem->allocated = 0;
318 mem->inherited = 0;
319 mem->changes = 0;
320 ialloc_reset_free(mem);
321 }
322
323 /* Initialize after a save or GC. */
324 void
ialloc_reset_free(gs_ref_memory_t * mem)325 ialloc_reset_free(gs_ref_memory_t * mem)
326 {
327 int i;
328 obj_header_t **p;
329
330 mem->lost.objects = 0;
331 mem->lost.refs = 0;
332 mem->lost.strings = 0;
333 mem->cfreed.cp = 0;
334 for (i = 0, p = &mem->freelists[0]; i < num_freelists; i++, p++)
335 *p = 0;
336 mem->largest_free_size = 0;
337 }
338
339 /*
340 * Set an arbitrary limit so that the amount of allocated VM does not grow
341 * indefinitely even when GC is disabled. Benchmarks have shown that
342 * the resulting GC's are infrequent enough not to degrade performance
343 * significantly.
344 */
345 #define FORCE_GC_LIMIT 8000000
346
347 /* Set the allocation limit after a change in one or more of */
348 /* vm_threshold, max_vm, or enabled, or after a GC. */
349 void
ialloc_set_limit(register gs_ref_memory_t * mem)350 ialloc_set_limit(register gs_ref_memory_t * mem)
351 { /*
352 * The following code is intended to set the limit so that
353 * we stop allocating when allocated + previous_status.allocated
354 * exceeds the lesser of max_vm or (if GC is enabled)
355 * gc_allocated + vm_threshold.
356 */
357 ulong max_allocated =
358 (mem->gc_status.max_vm > mem->previous_status.allocated ?
359 mem->gc_status.max_vm - mem->previous_status.allocated :
360 0);
361
362 if (mem->gc_status.enabled) {
363 ulong limit = mem->gc_allocated + mem->gc_status.vm_threshold;
364
365 if (limit < mem->previous_status.allocated)
366 mem->limit = 0;
367 else {
368 limit -= mem->previous_status.allocated;
369 mem->limit = min(limit, max_allocated);
370 }
371 } else
372 mem->limit = min(max_allocated, mem->gc_allocated + FORCE_GC_LIMIT);
373 if_debug7('0', "[0]space=%d, max_vm=%ld, prev.alloc=%ld, enabled=%d,\n\
374 gc_alloc=%ld, threshold=%ld => limit=%ld\n",
375 mem->space, (long)mem->gc_status.max_vm,
376 (long)mem->previous_status.allocated,
377 mem->gc_status.enabled, (long)mem->gc_allocated,
378 (long)mem->gc_status.vm_threshold, (long)mem->limit);
379 }
380
381 /*
382 * Free all the memory owned by the allocator, except the allocator itself.
383 * Note that this only frees memory at the current save level: the client
384 * is responsible for restoring to the outermost level if desired.
385 */
386 private void
i_free_all(gs_memory_t * mem,uint free_mask,client_name_t cname)387 i_free_all(gs_memory_t * mem, uint free_mask, client_name_t cname)
388 {
389 gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
390 chunk_t *cp;
391
392 if (free_mask & FREE_ALL_DATA) {
393 chunk_t *csucc;
394
395 /*
396 * Free the chunks in reverse order, to encourage LIFO behavior.
397 * Don't free the chunk holding the allocator itself.
398 */
399 for (cp = imem->clast; cp != 0; cp = csucc) {
400 csucc = cp->cprev; /* save before freeing */
401 if (cp->cbase + sizeof(obj_header_t) != (byte *)mem)
402 alloc_free_chunk(cp, imem);
403 }
404 }
405 if (free_mask & FREE_ALL_ALLOCATOR) {
406 /* Free the chunk holding the allocator itself. */
407 for (cp = imem->clast; cp != 0; cp = cp->cprev)
408 if (cp->cbase + sizeof(obj_header_t) == (byte *)mem) {
409 alloc_free_chunk(cp, imem);
410 break;
411 }
412 }
413 }
414
415 /* ================ Accessors ================ */
416
417 /* Get the size of an object from the header. */
418 private uint
i_object_size(gs_memory_t * mem,const void * obj)419 i_object_size(gs_memory_t * mem, const void /*obj_header_t */ *obj)
420 {
421 return pre_obj_contents_size((const obj_header_t *)obj - 1);
422 }
423
424 /* Get the type of a structure from the header. */
425 private gs_memory_type_ptr_t
i_object_type(gs_memory_t * mem,const void * obj)426 i_object_type(gs_memory_t * mem, const void /*obj_header_t */ *obj)
427 {
428 return ((const obj_header_t *)obj - 1)->o_type;
429 }
430
431 /* Get the GC status of a memory. */
432 void
gs_memory_gc_status(const gs_ref_memory_t * mem,gs_memory_gc_status_t * pstat)433 gs_memory_gc_status(const gs_ref_memory_t * mem, gs_memory_gc_status_t * pstat)
434 {
435 *pstat = mem->gc_status;
436 }
437
438 /* Set the GC status of a memory. */
439 void
gs_memory_set_gc_status(gs_ref_memory_t * mem,const gs_memory_gc_status_t * pstat)440 gs_memory_set_gc_status(gs_ref_memory_t * mem, const gs_memory_gc_status_t * pstat)
441 {
442 mem->gc_status = *pstat;
443 ialloc_set_limit(mem);
444 }
445
446 /* ================ Objects ================ */
447
448 /* Allocate a small object quickly if possible. */
449 /* The size must be substantially less than max_uint. */
450 /* ptr must be declared as obj_header_t *. */
451 /* pfl must be declared as obj_header_t **. */
452 #define IF_FREELIST_ALLOC(ptr, imem, size, pstype, pfl)\
453 if ( size <= max_freelist_size &&\
454 *(pfl = &imem->freelists[(size + obj_align_mask) >> log2_obj_align_mod]) != 0\
455 )\
456 { ptr = *pfl;\
457 *pfl = *(obj_header_t **)ptr;\
458 ptr[-1].o_size = size;\
459 ptr[-1].o_type = pstype;\
460 /* If debugging, clear the block in an attempt to */\
461 /* track down uninitialized data errors. */\
462 gs_alloc_fill(ptr, gs_alloc_fill_alloc, size);
463 #define ELSEIF_BIG_FREELIST_ALLOC(ptr, imem, size, pstype)\
464 }\
465 else if (size > max_freelist_size &&\
466 (ptr = large_freelist_alloc(imem, size)) != 0)\
467 { ptr[-1].o_type = pstype;\
468 /* If debugging, clear the block in an attempt to */\
469 /* track down uninitialized data errors. */\
470 gs_alloc_fill(ptr, gs_alloc_fill_alloc, size);
471 #define ELSEIF_LIFO_ALLOC(ptr, imem, size, pstype)\
472 }\
473 else if ( (imem->cc.ctop - (byte *)(ptr = (obj_header_t *)imem->cc.cbot))\
474 >= size + (obj_align_mod + sizeof(obj_header_t) * 2) &&\
475 size < imem->large_size\
476 )\
477 { imem->cc.cbot = (byte *)ptr + obj_size_round(size);\
478 ptr->o_alone = 0;\
479 ptr->o_size = size;\
480 ptr->o_type = pstype;\
481 ptr++;\
482 /* If debugging, clear the block in an attempt to */\
483 /* track down uninitialized data errors. */\
484 gs_alloc_fill(ptr, gs_alloc_fill_alloc, size);
485 #define ELSE_ALLOC\
486 }\
487 else
488
489 private byte *
i_alloc_bytes(gs_memory_t * mem,uint size,client_name_t cname)490 i_alloc_bytes(gs_memory_t * mem, uint size, client_name_t cname)
491 {
492 gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
493 obj_header_t *obj;
494 obj_header_t **pfl;
495
496 IF_FREELIST_ALLOC(obj, imem, size, &st_bytes, pfl)
497 alloc_trace(":+bf", imem, cname, NULL, size, obj);
498 ELSEIF_BIG_FREELIST_ALLOC(obj, imem, size, &st_bytes)
499 alloc_trace(":+bF", imem, cname, NULL, size, obj);
500 ELSEIF_LIFO_ALLOC(obj, imem, size, &st_bytes)
501 alloc_trace(":+b ", imem, cname, NULL, size, obj);
502 ELSE_ALLOC
503 {
504 obj = alloc_obj(imem, size, &st_bytes, 0, cname);
505 if (obj == 0)
506 return 0;
507 alloc_trace(":+b.", imem, cname, NULL, size, obj);
508 }
509 return (byte *) obj;
510 }
511 private byte *
i_alloc_bytes_immovable(gs_memory_t * mem,uint size,client_name_t cname)512 i_alloc_bytes_immovable(gs_memory_t * mem, uint size, client_name_t cname)
513 {
514 gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
515 obj_header_t *obj = alloc_obj(imem, size, &st_bytes,
516 ALLOC_IMMOVABLE | ALLOC_DIRECT, cname);
517
518 if (obj == 0)
519 return 0;
520 alloc_trace("|+b.", imem, cname, NULL, size, obj);
521 return (byte *) obj;
522 }
523 private void *
i_alloc_struct(gs_memory_t * mem,gs_memory_type_ptr_t pstype,client_name_t cname)524 i_alloc_struct(gs_memory_t * mem, gs_memory_type_ptr_t pstype,
525 client_name_t cname)
526 {
527 gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
528 uint size = pstype->ssize;
529 obj_header_t *obj;
530 obj_header_t **pfl;
531
532 ALLOC_CHECK_SIZE(pstype);
533 IF_FREELIST_ALLOC(obj, imem, size, pstype, pfl)
534 alloc_trace(":+<f", imem, cname, pstype, size, obj);
535 ELSEIF_BIG_FREELIST_ALLOC(obj, imem, size, pstype)
536 alloc_trace(":+<F", imem, cname, pstype, size, obj);
537 ELSEIF_LIFO_ALLOC(obj, imem, size, pstype)
538 alloc_trace(":+< ", imem, cname, pstype, size, obj);
539 ELSE_ALLOC
540 {
541 obj = alloc_obj(imem, size, pstype, 0, cname);
542 if (obj == 0)
543 return 0;
544 alloc_trace(":+<.", imem, cname, pstype, size, obj);
545 }
546 return obj;
547 }
548 private void *
i_alloc_struct_immovable(gs_memory_t * mem,gs_memory_type_ptr_t pstype,client_name_t cname)549 i_alloc_struct_immovable(gs_memory_t * mem, gs_memory_type_ptr_t pstype,
550 client_name_t cname)
551 {
552 gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
553 uint size = pstype->ssize;
554 obj_header_t *obj;
555
556 ALLOC_CHECK_SIZE(pstype);
557 obj = alloc_obj(imem, size, pstype, ALLOC_IMMOVABLE | ALLOC_DIRECT, cname);
558 alloc_trace("|+<.", imem, cname, pstype, size, obj);
559 return obj;
560 }
561 private byte *
i_alloc_byte_array(gs_memory_t * mem,uint num_elements,uint elt_size,client_name_t cname)562 i_alloc_byte_array(gs_memory_t * mem, uint num_elements, uint elt_size,
563 client_name_t cname)
564 {
565 gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
566 obj_header_t *obj = alloc_obj(imem, (ulong) num_elements * elt_size,
567 &st_bytes, ALLOC_DIRECT, cname);
568
569 if_debug6('A', "[a%d:+b.]%s -bytes-*(%lu=%u*%u) = 0x%lx\n",
570 alloc_trace_space(imem), client_name_string(cname),
571 (ulong) num_elements * elt_size,
572 num_elements, elt_size, (ulong) obj);
573 return (byte *) obj;
574 }
575 private byte *
i_alloc_byte_array_immovable(gs_memory_t * mem,uint num_elements,uint elt_size,client_name_t cname)576 i_alloc_byte_array_immovable(gs_memory_t * mem, uint num_elements,
577 uint elt_size, client_name_t cname)
578 {
579 gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
580 obj_header_t *obj = alloc_obj(imem, (ulong) num_elements * elt_size,
581 &st_bytes, ALLOC_IMMOVABLE | ALLOC_DIRECT,
582 cname);
583
584 if_debug6('A', "[a%d|+b.]%s -bytes-*(%lu=%u*%u) = 0x%lx\n",
585 alloc_trace_space(imem), client_name_string(cname),
586 (ulong) num_elements * elt_size,
587 num_elements, elt_size, (ulong) obj);
588 return (byte *) obj;
589 }
590 private void *
i_alloc_struct_array(gs_memory_t * mem,uint num_elements,gs_memory_type_ptr_t pstype,client_name_t cname)591 i_alloc_struct_array(gs_memory_t * mem, uint num_elements,
592 gs_memory_type_ptr_t pstype, client_name_t cname)
593 {
594 gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
595 obj_header_t *obj;
596
597 ALLOC_CHECK_SIZE(pstype);
598 obj = alloc_obj(imem,
599 (ulong) num_elements * pstype->ssize,
600 pstype, ALLOC_DIRECT, cname);
601 if_debug7('A', "[a%d:+<.]%s %s*(%lu=%u*%u) = 0x%lx\n",
602 alloc_trace_space(imem), client_name_string(cname),
603 struct_type_name_string(pstype),
604 (ulong) num_elements * pstype->ssize,
605 num_elements, pstype->ssize, (ulong) obj);
606 return (char *)obj;
607 }
608 private void *
i_alloc_struct_array_immovable(gs_memory_t * mem,uint num_elements,gs_memory_type_ptr_t pstype,client_name_t cname)609 i_alloc_struct_array_immovable(gs_memory_t * mem, uint num_elements,
610 gs_memory_type_ptr_t pstype, client_name_t cname)
611 {
612 gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
613 obj_header_t *obj;
614
615 ALLOC_CHECK_SIZE(pstype);
616 obj = alloc_obj(imem,
617 (ulong) num_elements * pstype->ssize,
618 pstype, ALLOC_IMMOVABLE | ALLOC_DIRECT, cname);
619 if_debug7('A', "[a%d|+<.]%s %s*(%lu=%u*%u) = 0x%lx\n",
620 alloc_trace_space(imem), client_name_string(cname),
621 struct_type_name_string(pstype),
622 (ulong) num_elements * pstype->ssize,
623 num_elements, pstype->ssize, (ulong) obj);
624 return (char *)obj;
625 }
626 private void *
i_resize_object(gs_memory_t * mem,void * obj,uint new_num_elements,client_name_t cname)627 i_resize_object(gs_memory_t * mem, void *obj, uint new_num_elements,
628 client_name_t cname)
629 {
630 gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
631 obj_header_t *pp = (obj_header_t *) obj - 1;
632 gs_memory_type_ptr_t pstype = pp->o_type;
633 ulong old_size = pre_obj_contents_size(pp);
634 ulong new_size = (ulong) pstype->ssize * new_num_elements;
635 ulong old_size_rounded = obj_align_round(old_size);
636 ulong new_size_rounded = obj_align_round(new_size);
637 void *new_obj = NULL;
638
639 if (old_size_rounded == new_size_rounded) {
640 pp->o_size = new_size;
641 new_obj = obj;
642 } else
643 if ((byte *)obj + old_size_rounded == imem->cc.cbot &&
644 imem->cc.ctop - (byte *)obj >= new_size_rounded ) {
645 imem->cc.cbot = (byte *)obj + new_size_rounded;
646 pp->o_size = new_size;
647 new_obj = obj;
648 } else /* try and trim the object -- but only if room for a dummy header */
649 if (new_size_rounded + sizeof(obj_header_t) <= old_size_rounded) {
650 trim_obj(imem, obj, new_size, (chunk_t *)0);
651 new_obj = obj;
652 }
653 if (new_obj) {
654 if_debug8('A', "[a%d:%c%c ]%s %s(%lu=>%lu) 0x%lx\n",
655 alloc_trace_space(imem),
656 (new_size > old_size ? '>' : '<'),
657 (pstype == &st_bytes ? 'b' : '<'),
658 client_name_string(cname),
659 struct_type_name_string(pstype),
660 old_size, new_size, (ulong) obj);
661 return new_obj;
662 }
663 /* Punt. */
664 new_obj = gs_alloc_struct_array(mem, new_num_elements, void,
665 pstype, cname);
666 if (new_obj == 0)
667 return 0;
668 memcpy(new_obj, obj, min(old_size, new_size));
669 gs_free_object(mem, obj, cname);
670 return new_obj;
671 }
672 private void
i_free_object(gs_memory_t * mem,void * ptr,client_name_t cname)673 i_free_object(gs_memory_t * mem, void *ptr, client_name_t cname)
674 {
675 gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
676 obj_header_t *pp;
677 gs_memory_type_ptr_t pstype;
678
679 struct_proc_finalize((*finalize));
680 uint size, rounded_size;
681
682 if (ptr == 0)
683 return;
684 pp = (obj_header_t *) ptr - 1;
685 pstype = pp->o_type;
686 #ifdef DEBUG
687 if (gs_debug_c('?')) {
688 chunk_locator_t cld;
689
690 if (pstype == &st_free) {
691 lprintf2("%s: object 0x%lx already free!\n",
692 client_name_string(cname), (ulong) ptr);
693 return; /*gs_abort(); */
694 }
695 /* Check that this allocator owns the object being freed. */
696 cld.memory = imem;
697 while ((cld.cp = cld.memory->clast),
698 !chunk_locate_ptr(ptr, &cld)
699 ) {
700 if (!cld.memory->saved) {
701 lprintf3("%s: freeing 0x%lx, not owned by memory 0x%lx!\n",
702 client_name_string(cname), (ulong) ptr,
703 (ulong) mem);
704 return; /*gs_abort(); */
705 }
706 /****** HACK: we know the saved state is the first ******
707 ****** member of an alloc_save_t. ******/
708 cld.memory = (gs_ref_memory_t *) cld.memory->saved;
709 }
710 /* Check that the object is in the allocated region. */
711 if (cld.memory == imem && cld.cp == imem->pcc)
712 cld.cp = &imem->cc;
713 if (!(PTR_BETWEEN((const byte *)pp, cld.cp->cbase,
714 cld.cp->cbot))
715 ) {
716 lprintf5("%s: freeing 0x%lx,\n\toutside chunk 0x%lx cbase=0x%lx, cbot=0x%lx!\n",
717 client_name_string(cname), (ulong) ptr,
718 (ulong) cld.cp, (ulong) cld.cp->cbase,
719 (ulong) cld.cp->cbot);
720 return; /*gs_abort(); */
721 }
722 }
723 #endif
724 size = pre_obj_contents_size(pp);
725 rounded_size = obj_align_round(size);
726 finalize = pstype->finalize;
727 if (finalize != 0) {
728 if_debug3('u', "[u]finalizing %s 0x%lx (%s)\n",
729 struct_type_name_string(pstype),
730 (ulong) ptr, client_name_string(cname));
731 (*finalize) (ptr);
732 }
733 if ((byte *) ptr + rounded_size == imem->cc.cbot) {
734 alloc_trace(":-o ", imem, cname, pstype, size, ptr);
735 gs_alloc_fill(ptr, gs_alloc_fill_free, size);
736 imem->cc.cbot = (byte *) pp;
737 /* IFF this object is adjacent to (or below) the byte after the
738 * highest free object, do the consolidation within this chunk. */
739 if ((byte *)pp <= imem->cc.int_freed_top) {
740 consolidate_chunk_free(&(imem->cc), imem);
741 }
742 return;
743 }
744 if (pp->o_alone) {
745 /*
746 * We gave this object its own chunk. Free the entire chunk,
747 * unless it belongs to an older save level, in which case
748 * we mustn't overwrite it.
749 */
750 chunk_locator_t cl;
751
752 #ifdef DEBUG
753 {
754 chunk_locator_t cld;
755
756 cld.memory = imem;
757 cld.cp = 0;
758 if (gs_debug_c('a'))
759 alloc_trace(
760 (chunk_locate_ptr(ptr, &cld) ? ":-oL" : ":-o~"),
761 imem, cname, pstype, size, ptr);
762 }
763 #endif
764 cl.memory = imem;
765 cl.cp = 0;
766 if (chunk_locate_ptr(ptr, &cl)) {
767 if (!imem->is_controlled)
768 alloc_free_chunk(cl.cp, imem);
769 return;
770 }
771 /* Don't overwrite even if gs_alloc_debug is set. */
772 }
773 if (rounded_size >= sizeof(obj_header_t *)) {
774 /*
775 * Put the object on a freelist, unless it belongs to
776 * an older save level, in which case we mustn't
777 * overwrite it.
778 */
779 imem->cfreed.memory = imem;
780 if (chunk_locate(ptr, &imem->cfreed)) {
781 obj_header_t **pfl;
782
783 if (size > max_freelist_size) {
784 pfl = &imem->freelists[LARGE_FREELIST_INDEX];
785 if (rounded_size > imem->largest_free_size)
786 imem->largest_free_size = rounded_size;
787 } else {
788 pfl = &imem->freelists[(size + obj_align_mask) >>
789 log2_obj_align_mod];
790 }
791 /* keep track of highest object on a freelist */
792 if ((byte *)pp >= imem->cc.int_freed_top)
793 imem->cc.int_freed_top = (byte *)ptr + rounded_size;
794 pp->o_type = &st_free; /* don't confuse GC */
795 gs_alloc_fill(ptr, gs_alloc_fill_free, size);
796 *(obj_header_t **) ptr = *pfl;
797 *pfl = (obj_header_t *) ptr;
798 alloc_trace((size > max_freelist_size ? ":-oF" : ":-of"),
799 imem, cname, pstype, size, ptr);
800 return;
801 }
802 /* Don't overwrite even if gs_alloc_debug is set. */
803 } else {
804 pp->o_type = &st_free; /* don't confuse GC */
805 gs_alloc_fill(ptr, gs_alloc_fill_free, size);
806 }
807 alloc_trace(":-o#", imem, cname, pstype, size, ptr);
808 imem->lost.objects += obj_size_round(size);
809 }
810 private byte *
i_alloc_string(gs_memory_t * mem,uint nbytes,client_name_t cname)811 i_alloc_string(gs_memory_t * mem, uint nbytes, client_name_t cname)
812 {
813 gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
814 byte *str;
815
816 top:if (imem->cc.ctop - imem->cc.cbot > nbytes) {
817 if_debug4('A', "[a%d:+> ]%s(%u) = 0x%lx\n",
818 alloc_trace_space(imem), client_name_string(cname), nbytes,
819 (ulong) (imem->cc.ctop - nbytes));
820 str = imem->cc.ctop -= nbytes;
821 gs_alloc_fill(str, gs_alloc_fill_alloc, nbytes);
822 return str;
823 }
824 if (nbytes > string_space_quanta(max_uint - sizeof(chunk_head_t)) *
825 string_data_quantum
826 ) { /* Can't represent the size in a uint! */
827 return 0;
828 }
829 if (nbytes >= imem->large_size) { /* Give it a chunk all its own. */
830 return i_alloc_string_immovable(mem, nbytes, cname);
831 } else { /* Add another chunk. */
832 chunk_t *cp =
833 alloc_acquire_chunk(imem, (ulong) imem->chunk_size, true, "chunk");
834
835 if (cp == 0)
836 return 0;
837 alloc_close_chunk(imem);
838 imem->pcc = cp;
839 imem->cc = *imem->pcc;
840 gs_alloc_fill(imem->cc.cbase, gs_alloc_fill_free,
841 imem->cc.climit - imem->cc.cbase);
842 goto top;
843 }
844 }
845 private byte *
i_alloc_string_immovable(gs_memory_t * mem,uint nbytes,client_name_t cname)846 i_alloc_string_immovable(gs_memory_t * mem, uint nbytes, client_name_t cname)
847 {
848 gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
849 byte *str;
850 /* Give it a chunk all its own. */
851 uint asize = string_chunk_space(nbytes) + sizeof(chunk_head_t);
852 chunk_t *cp = alloc_acquire_chunk(imem, (ulong) asize, true,
853 "large string chunk");
854
855 if (cp == 0)
856 return 0;
857 str = cp->ctop = cp->climit - nbytes;
858 if_debug4('a', "[a%d|+>L]%s(%u) = 0x%lx\n",
859 alloc_trace_space(imem), client_name_string(cname), nbytes,
860 (ulong) str);
861 gs_alloc_fill(str, gs_alloc_fill_alloc, nbytes);
862 return str;
863 }
864 private byte *
i_resize_string(gs_memory_t * mem,byte * data,uint old_num,uint new_num,client_name_t cname)865 i_resize_string(gs_memory_t * mem, byte * data, uint old_num, uint new_num,
866 client_name_t cname)
867 {
868 gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
869 byte *ptr;
870
871 if (old_num == new_num) /* same size returns the same string */
872 return data;
873 if (data == imem->cc.ctop && /* bottom-most string */
874 (new_num < old_num ||
875 imem->cc.ctop - imem->cc.cbot > new_num - old_num)
876 ) { /* Resize in place. */
877 ptr = data + old_num - new_num;
878 if_debug6('A', "[a%d:%c> ]%s(%u->%u) 0x%lx\n",
879 alloc_trace_space(imem),
880 (new_num > old_num ? '>' : '<'),
881 client_name_string(cname), old_num, new_num,
882 (ulong) ptr);
883 imem->cc.ctop = ptr;
884 memmove(ptr, data, min(old_num, new_num));
885 #ifdef DEBUG
886 if (new_num > old_num)
887 gs_alloc_fill(ptr + old_num, gs_alloc_fill_alloc,
888 new_num - old_num);
889 else
890 gs_alloc_fill(data, gs_alloc_fill_free, old_num - new_num);
891 #endif
892 } else
893 if (new_num < old_num) {
894 /* trim the string and create a free space hole */
895 ptr = data;
896 imem->lost.strings += old_num - new_num;
897 gs_alloc_fill(data + new_num, gs_alloc_fill_free,
898 old_num - new_num);
899 if_debug5('A', "[a%d:<> ]%s(%u->%u) 0x%lx\n",
900 alloc_trace_space(imem), client_name_string(cname),
901 old_num, new_num, (ulong)ptr);
902 } else { /* Punt. */
903 ptr = gs_alloc_string(mem, new_num, cname);
904 if (ptr == 0)
905 return 0;
906 memcpy(ptr, data, min(old_num, new_num));
907 gs_free_string(mem, data, old_num, cname);
908 }
909 return ptr;
910 }
911
912 private void
i_free_string(gs_memory_t * mem,byte * data,uint nbytes,client_name_t cname)913 i_free_string(gs_memory_t * mem, byte * data, uint nbytes,
914 client_name_t cname)
915 {
916 gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
917 if (data == imem->cc.ctop) {
918 if_debug4('A', "[a%d:-> ]%s(%u) 0x%lx\n",
919 alloc_trace_space(imem), client_name_string(cname), nbytes,
920 (ulong) data);
921 imem->cc.ctop += nbytes;
922 } else {
923 if_debug4('A', "[a%d:->#]%s(%u) 0x%lx\n",
924 alloc_trace_space(imem), client_name_string(cname), nbytes,
925 (ulong) data);
926 imem->lost.strings += nbytes;
927 }
928 gs_alloc_fill(data, gs_alloc_fill_free, nbytes);
929 }
930
931 private gs_memory_t *
i_stable(gs_memory_t * mem)932 i_stable(gs_memory_t *mem)
933 {
934 return mem->stable_memory;
935 }
936
937 private void
i_status(gs_memory_t * mem,gs_memory_status_t * pstat)938 i_status(gs_memory_t * mem, gs_memory_status_t * pstat)
939 {
940 gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
941 ulong unused = imem->lost.refs + imem->lost.strings;
942 ulong inner = 0;
943
944 alloc_close_chunk(imem);
945 /* Add up unallocated space within each chunk. */
946 /* Also keep track of space allocated to inner chunks, */
947 /* which are included in previous_status.allocated. */
948 {
949 const chunk_t *cp = imem->cfirst;
950
951 while (cp != 0) {
952 unused += cp->ctop - cp->cbot;
953 if (cp->outer)
954 inner += cp->cend - (byte *) cp->chead;
955 cp = cp->cnext;
956 }
957 }
958 unused += compute_free_objects(imem);
959 pstat->used = imem->allocated + inner - unused +
960 imem->previous_status.used;
961 pstat->allocated = imem->allocated +
962 imem->previous_status.allocated;
963 }
964
965 private void
i_enable_free(gs_memory_t * mem,bool enable)966 i_enable_free(gs_memory_t * mem, bool enable)
967 {
968 if (enable)
969 mem->procs.free_object = i_free_object,
970 mem->procs.free_string = i_free_string;
971 else
972 mem->procs.free_object = gs_ignore_free_object,
973 mem->procs.free_string = gs_ignore_free_string;
974 }
975
976 /* ------ Internal procedures ------ */
977
978 /* Compute the amount of free object space by scanning free lists. */
979 private ulong
compute_free_objects(gs_ref_memory_t * mem)980 compute_free_objects(gs_ref_memory_t * mem)
981 {
982 ulong unused = mem->lost.objects;
983 int i;
984
985 /* Add up space on free lists. */
986 for (i = 0; i < num_freelists; i++) {
987 const obj_header_t *pfree;
988
989 for (pfree = mem->freelists[i]; pfree != 0;
990 pfree = *(const obj_header_t * const *)pfree
991 )
992 unused += obj_align_round(pfree[-1].o_size);
993 }
994 return unused;
995 }
996
997 /* Allocate an object from the large-block freelist. */
998 private obj_header_t * /* rets obj if allocated, else 0 */
large_freelist_alloc(gs_ref_memory_t * mem,uint size)999 large_freelist_alloc(gs_ref_memory_t *mem, uint size)
1000 {
1001 /* Scan large object freelist. We'll grab an object up to 1/8 bigger */
1002 /* right away, else use best fit of entire scan. */
1003 uint aligned_size = obj_align_round(size);
1004 uint aligned_min_size = aligned_size + sizeof(obj_header_t);
1005 uint aligned_max_size =
1006 aligned_min_size + obj_align_round(aligned_min_size / 8);
1007 obj_header_t *best_fit = 0;
1008 obj_header_t **best_fit_prev;
1009 uint best_fit_size = max_uint;
1010 obj_header_t *pfree;
1011 obj_header_t **ppfprev = &mem->freelists[LARGE_FREELIST_INDEX];
1012 uint largest_size = 0;
1013
1014 if (aligned_size > mem->largest_free_size)
1015 return 0; /* definitely no block large enough */
1016
1017 while ((pfree = *ppfprev) != 0) {
1018 uint free_size = obj_align_round(pfree[-1].o_size);
1019
1020 if (free_size == aligned_size ||
1021 (free_size >= aligned_min_size && free_size < best_fit_size)
1022 ) {
1023 best_fit = pfree;
1024 best_fit_prev = ppfprev;
1025 best_fit_size = pfree[-1].o_size;
1026 if (best_fit_size <= aligned_max_size)
1027 break; /* good enough fit to spare scan of entire list */
1028 }
1029 ppfprev = (obj_header_t **) pfree;
1030 if (free_size > largest_size)
1031 largest_size = free_size;
1032 }
1033 if (best_fit == 0) {
1034 /*
1035 * No single free chunk is large enough, but since we scanned the
1036 * entire list, we now have an accurate updated value for
1037 * largest_free_size.
1038 */
1039 mem->largest_free_size = largest_size;
1040 return 0;
1041 }
1042
1043 /* Remove from freelist & return excess memory to free */
1044 *best_fit_prev = *(obj_header_t **)best_fit;
1045 trim_obj(mem, best_fit, aligned_size, (chunk_t *)0);
1046
1047 /* Pre-init block header; o_alone & o_type are already init'd */
1048 best_fit[-1].o_size = size;
1049
1050 return best_fit;
1051 }
1052
1053 /* Allocate an object. This handles all but the fastest, simplest case. */
1054 private obj_header_t *
alloc_obj(gs_ref_memory_t * mem,ulong lsize,gs_memory_type_ptr_t pstype,alloc_flags_t flags,client_name_t cname)1055 alloc_obj(gs_ref_memory_t *mem, ulong lsize, gs_memory_type_ptr_t pstype,
1056 alloc_flags_t flags, client_name_t cname)
1057 {
1058 obj_header_t *ptr;
1059
1060 if (lsize >= mem->large_size || (flags & ALLOC_IMMOVABLE)) {
1061 /*
1062 * Give the object a chunk all its own. Note that this case does
1063 * not occur if is_controlled is true.
1064 */
1065 ulong asize =
1066 ((lsize + obj_align_mask) & -obj_align_mod) +
1067 sizeof(obj_header_t);
1068 chunk_t *cp =
1069 alloc_acquire_chunk(mem, asize + sizeof(chunk_head_t), false,
1070 "large object chunk");
1071
1072 if (
1073 #if arch_sizeof_long > arch_sizeof_int
1074 asize > max_uint
1075 #else
1076 asize < lsize
1077 #endif
1078 )
1079 return 0;
1080 if (cp == 0)
1081 return 0;
1082 ptr = (obj_header_t *) cp->cbot;
1083 cp->cbot += asize;
1084 ptr->o_alone = 1;
1085 ptr->o_size = lsize;
1086 } else if (lsize > max_freelist_size && (flags & ALLOC_DIRECT) &&
1087 (ptr = large_freelist_alloc(mem, lsize)) != 0) {
1088 /* We hadn't checked the large block freelist yet. */
1089 --ptr; /* must point to header */
1090 goto done;
1091 } else {
1092 uint asize = obj_size_round((uint) lsize);
1093 bool consolidate = mem->is_controlled;
1094 bool allocate_success = true;
1095
1096 while (mem->cc.ctop -
1097 (byte *) (ptr = (obj_header_t *) mem->cc.cbot)
1098 <= asize + sizeof(obj_header_t)) {
1099 if (consolidate) {
1100 /* Try consolidating free space. */
1101 gs_consolidate_free((gs_memory_t *)mem);
1102 consolidate = false;
1103 continue;
1104 } else {
1105 /* Add another chunk. */
1106 chunk_t *cp =
1107 alloc_add_chunk(mem, (ulong)mem->chunk_size, "chunk");
1108
1109 if (cp == 0) {
1110 allocate_success = false;
1111 break;
1112 }
1113 }
1114 }
1115 /*
1116 * If no success, try to scavenge from low free memory. This is only
1117 * enabled for controlled memory (currently only async renderer)
1118 * because it's too much work to prevent it from examining outer
1119 * save levels in the general case.
1120 */
1121 if (allocate_success)
1122 mem->cc.cbot = (byte *) ptr + asize;
1123 else if (!mem->is_controlled ||
1124 (ptr = scavenge_low_free(mem, (uint)lsize)) == 0)
1125 return 0; /* allocation failed */
1126 ptr->o_alone = 0;
1127 ptr->o_size = (uint) lsize;
1128 }
1129 done:
1130 ptr->o_type = pstype;
1131 ptr++;
1132 gs_alloc_fill(ptr, gs_alloc_fill_alloc, lsize);
1133 return ptr;
1134 }
1135
1136 /*
1137 * Consolidate free objects contiguous to free space at cbot onto the cbot
1138 * area. Also keep track of end of highest internal free object
1139 * (int_freed_top).
1140 */
1141 private void
consolidate_chunk_free(chunk_t * cp,gs_ref_memory_t * mem)1142 consolidate_chunk_free(chunk_t *cp, gs_ref_memory_t *mem)
1143 {
1144 obj_header_t *begin_free = 0;
1145
1146 cp->int_freed_top = cp->cbase; /* below all objects in chunk */
1147 SCAN_CHUNK_OBJECTS(cp)
1148 DO_ALL
1149 if (pre->o_type == &st_free) {
1150 if (begin_free == 0)
1151 begin_free = pre;
1152 } else {
1153 if (begin_free)
1154 cp->int_freed_top = (byte *)pre; /* first byte following internal free */
1155 begin_free = 0;
1156 }
1157 END_OBJECTS_SCAN
1158 if (begin_free) {
1159 /* We found free objects at the top of the object area. */
1160 /* Remove the free objects from the freelists. */
1161 remove_range_from_freelist(mem, begin_free, cp->cbot);
1162 if_debug4('a', "[a]resetting chunk 0x%lx cbot from 0x%lx to 0x%lx (%lu free)\n",
1163 (ulong) cp, (ulong) cp->cbot, (ulong) begin_free,
1164 (ulong) ((byte *) cp->cbot - (byte *) begin_free));
1165 cp->cbot = (byte *) begin_free;
1166 }
1167 }
1168
1169 /* Consolidate free objects. */
1170 void
ialloc_consolidate_free(gs_ref_memory_t * mem)1171 ialloc_consolidate_free(gs_ref_memory_t *mem)
1172 {
1173 chunk_t *cp;
1174 chunk_t *cprev;
1175
1176 alloc_close_chunk(mem);
1177
1178 /* Visit chunks in reverse order to encourage LIFO behavior. */
1179 for (cp = mem->clast; cp != 0; cp = cprev) {
1180 cprev = cp->cprev;
1181 consolidate_chunk_free(cp, mem);
1182 if (cp->cbot == cp->cbase && cp->ctop == cp->climit) {
1183 /* The entire chunk is free. */
1184 chunk_t *cnext = cp->cnext;
1185
1186 if (!mem->is_controlled) {
1187 alloc_free_chunk(cp, mem);
1188 if (mem->pcc == cp)
1189 mem->pcc =
1190 (cnext == 0 ? cprev : cprev == 0 ? cnext :
1191 cprev->cbot - cprev->ctop >
1192 cnext->cbot - cnext->ctop ? cprev :
1193 cnext);
1194 }
1195 }
1196 }
1197 alloc_open_chunk(mem);
1198 }
1199 private void
i_consolidate_free(gs_memory_t * mem)1200 i_consolidate_free(gs_memory_t *mem)
1201 {
1202 ialloc_consolidate_free((gs_ref_memory_t *)mem);
1203 }
1204
1205 /* try to free-up given amount of space from freespace below chunk base */
1206 private obj_header_t * /* returns uninitialized object hdr, NULL if none found */
scavenge_low_free(gs_ref_memory_t * mem,unsigned request_size)1207 scavenge_low_free(gs_ref_memory_t *mem, unsigned request_size)
1208 {
1209 /* find 1st range of memory that can be glued back together to fill request */
1210 obj_header_t *found_pre = 0;
1211
1212 /* Visit chunks in forward order */
1213 obj_header_t *begin_free = 0;
1214 uint found_free;
1215 uint request_size_rounded = obj_size_round(request_size);
1216 uint need_free = request_size_rounded + sizeof(obj_header_t); /* room for GC's dummy hdr */
1217 chunk_t *cp;
1218
1219 for (cp = mem->cfirst; cp != 0; cp = cp->cnext) {
1220 begin_free = 0;
1221 found_free = 0;
1222 SCAN_CHUNK_OBJECTS(cp)
1223 DO_ALL
1224 if (pre->o_type == &st_free) {
1225 if (begin_free == 0) {
1226 found_free = 0;
1227 begin_free = pre;
1228 }
1229 found_free += pre_obj_rounded_size(pre);
1230 if (begin_free != 0 && found_free >= need_free)
1231 break;
1232 } else
1233 begin_free = 0;
1234 END_OBJECTS_SCAN_INCOMPLETE
1235
1236 /* Found sufficient range of empty memory */
1237 if (begin_free != 0 && found_free >= need_free) {
1238
1239 /* Fish found pieces out of various freelists */
1240 remove_range_from_freelist(mem, (char*)begin_free,
1241 (char*)begin_free + found_free);
1242
1243 /* Prepare found object */
1244 found_pre = begin_free;
1245 found_pre->o_type = &st_free; /* don't confuse GC if gets lost */
1246 found_pre->o_size = found_free - sizeof(obj_header_t);
1247
1248 /* Chop off excess tail piece & toss it back into free pool */
1249 trim_obj(mem, found_pre + 1, request_size, cp);
1250 }
1251 }
1252 return found_pre;
1253 }
1254
1255 /* Remove range of memory from a mem's freelists */
1256 private void
remove_range_from_freelist(gs_ref_memory_t * mem,void * bottom,void * top)1257 remove_range_from_freelist(gs_ref_memory_t *mem, void* bottom, void* top)
1258 {
1259 int num_free[num_freelists];
1260 int smallest = num_freelists, largest = -1;
1261 const obj_header_t *cur;
1262 uint size;
1263 int i;
1264 uint removed = 0;
1265
1266 /*
1267 * Scan from bottom to top, a range containing only free objects,
1268 * counting the number of objects of each size.
1269 */
1270
1271 for (cur = bottom; cur != top;
1272 cur = (const obj_header_t *)
1273 ((const byte *)cur + obj_size_round(size))
1274 ) {
1275 size = cur->o_size;
1276 i = (size > max_freelist_size ? LARGE_FREELIST_INDEX :
1277 (size + obj_align_mask) >> log2_obj_align_mod);
1278 if (i < smallest) {
1279 /*
1280 * 0-length free blocks aren't kept on any list, because
1281 * they don't have room for a pointer.
1282 */
1283 if (i == 0)
1284 continue;
1285 if (smallest < num_freelists)
1286 memset(&num_free[i], 0, (smallest - i) * sizeof(int));
1287 else
1288 num_free[i] = 0;
1289 smallest = i;
1290 }
1291 if (i > largest) {
1292 if (largest >= 0)
1293 memset(&num_free[largest + 1], 0, (i - largest) * sizeof(int));
1294 largest = i;
1295 }
1296 num_free[i]++;
1297 }
1298
1299 /*
1300 * Remove free objects from the freelists, adjusting lost.objects by
1301 * subtracting the size of the region being processed minus the amount
1302 * of space reclaimed.
1303 */
1304
1305 for (i = smallest; i <= largest; i++) {
1306 int count = num_free[i];
1307 obj_header_t *pfree;
1308 obj_header_t **ppfprev;
1309
1310 if (!count)
1311 continue;
1312 ppfprev = &mem->freelists[i];
1313 for (;;) {
1314 pfree = *ppfprev;
1315 if (PTR_GE(pfree, bottom) && PTR_LT(pfree, top)) {
1316 /* We're removing an object. */
1317 *ppfprev = *(obj_header_t **) pfree;
1318 removed += obj_align_round(pfree[-1].o_size);
1319 if (!--count)
1320 break;
1321 } else
1322 ppfprev = (obj_header_t **) pfree;
1323 }
1324 }
1325 mem->lost.objects -= (char*)top - (char*)bottom - removed;
1326 }
1327
1328 /* Trim a memory object down to a given size */
1329 private void
trim_obj(gs_ref_memory_t * mem,obj_header_t * obj,uint size,chunk_t * cp)1330 trim_obj(gs_ref_memory_t *mem, obj_header_t *obj, uint size, chunk_t *cp)
1331 /* Obj must have rounded size == req'd size, or have enough room for */
1332 /* trailing dummy obj_header */
1333 {
1334 uint rounded_size = obj_align_round(size);
1335 obj_header_t *pre_obj = obj - 1;
1336 obj_header_t *excess_pre = (obj_header_t*)((char*)obj + rounded_size);
1337 uint old_rounded_size = obj_align_round(pre_obj->o_size);
1338 uint excess_size = old_rounded_size - rounded_size - sizeof(obj_header_t);
1339
1340 /* trim object's size to desired */
1341 pre_obj->o_size = size;
1342 if (old_rounded_size == rounded_size)
1343 return; /* nothing more to do here */
1344 /*
1345 * If the object is alone in its chunk, move cbot to point to the end
1346 * of the object.
1347 */
1348 if (pre_obj->o_alone) {
1349 if (!cp) {
1350 mem->cfreed.memory = mem;
1351 if (chunk_locate(obj, &mem->cfreed)) {
1352 cp = mem->cfreed.cp;
1353 }
1354 }
1355 if (cp) {
1356 #ifdef DEBUG
1357 if (cp->cbot != (byte *)obj + old_rounded_size) {
1358 lprintf3("resizing 0x%lx, old size %u, new size %u, cbot wrong!\n",
1359 (ulong)obj, old_rounded_size, size);
1360 /* gs_abort */
1361 } else
1362 #endif
1363 {
1364 cp->cbot = (byte *)excess_pre;
1365 return;
1366 }
1367 }
1368 /*
1369 * Something very weird is going on. This probably shouldn't
1370 * ever happen, but if it does....
1371 */
1372 pre_obj->o_alone = 0;
1373 }
1374 /* make excess into free obj */
1375 excess_pre->o_type = &st_free; /* don't confuse GC */
1376 excess_pre->o_size = excess_size;
1377 excess_pre->o_alone = 0;
1378 if (excess_size >= obj_align_mod) {
1379 /* Put excess object on a freelist */
1380 obj_header_t **pfl;
1381
1382 if ((byte *)excess_pre >= mem->cc.int_freed_top)
1383 mem->cc.int_freed_top = (byte *)excess_pre + excess_size;
1384 if (excess_size <= max_freelist_size)
1385 pfl = &mem->freelists[(excess_size + obj_align_mask) >>
1386 log2_obj_align_mod];
1387 else {
1388 uint rounded_size = obj_align_round(excess_size);
1389
1390 pfl = &mem->freelists[LARGE_FREELIST_INDEX];
1391 if (rounded_size > mem->largest_free_size)
1392 mem->largest_free_size = rounded_size;
1393 }
1394 *(obj_header_t **) (excess_pre + 1) = *pfl;
1395 *pfl = excess_pre + 1;
1396 mem->cfreed.memory = mem;
1397 } else {
1398 /* excess piece will be "lost" memory */
1399 mem->lost.objects += excess_size + sizeof(obj_header_t);
1400 }
1401 }
1402
1403 /* ================ Roots ================ */
1404
1405 /* Register a root. */
1406 private int
i_register_root(gs_memory_t * mem,gs_gc_root_t * rp,gs_ptr_type_t ptype,void ** up,client_name_t cname)1407 i_register_root(gs_memory_t * mem, gs_gc_root_t * rp, gs_ptr_type_t ptype,
1408 void **up, client_name_t cname)
1409 {
1410 gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
1411
1412 if (rp == NULL) {
1413 rp = gs_raw_alloc_struct_immovable(imem->parent, &st_gc_root_t,
1414 "i_register_root");
1415 if (rp == 0)
1416 return_error(gs_error_VMerror);
1417 rp->free_on_unregister = true;
1418 } else
1419 rp->free_on_unregister = false;
1420 if_debug3('8', "[8]register root(%s) 0x%lx -> 0x%lx\n",
1421 client_name_string(cname), (ulong)rp, (ulong)up);
1422 rp->ptype = ptype;
1423 rp->p = up;
1424 rp->next = imem->roots;
1425 imem->roots = rp;
1426 return 0;
1427 }
1428
1429 /* Unregister a root. */
1430 private void
i_unregister_root(gs_memory_t * mem,gs_gc_root_t * rp,client_name_t cname)1431 i_unregister_root(gs_memory_t * mem, gs_gc_root_t * rp, client_name_t cname)
1432 {
1433 gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
1434 gs_gc_root_t **rpp = &imem->roots;
1435
1436 if_debug2('8', "[8]unregister root(%s) 0x%lx\n",
1437 client_name_string(cname), (ulong) rp);
1438 while (*rpp != rp)
1439 rpp = &(*rpp)->next;
1440 *rpp = (*rpp)->next;
1441 if (rp->free_on_unregister)
1442 gs_free_object(imem->parent, rp, "i_unregister_root");
1443 }
1444
1445 /* ================ Chunks ================ */
1446
1447 public_st_chunk();
1448
1449 /* Insert a chunk in the chain. This is exported for the GC and for */
1450 /* the forget_save operation. */
1451 void
alloc_link_chunk(chunk_t * cp,gs_ref_memory_t * imem)1452 alloc_link_chunk(chunk_t * cp, gs_ref_memory_t * imem)
1453 {
1454 byte *cdata = cp->cbase;
1455 chunk_t *icp;
1456 chunk_t *prev;
1457
1458 /*
1459 * Allocators tend to allocate in either ascending or descending
1460 * address order. The loop will handle the latter well; check for
1461 * the former first.
1462 */
1463 if (imem->clast && PTR_GE(cdata, imem->clast->ctop))
1464 icp = 0;
1465 else
1466 for (icp = imem->cfirst; icp != 0 && PTR_GE(cdata, icp->ctop);
1467 icp = icp->cnext
1468 );
1469 cp->cnext = icp;
1470 if (icp == 0) { /* add at end of chain */
1471 prev = imem->clast;
1472 imem->clast = cp;
1473 } else { /* insert before icp */
1474 prev = icp->cprev;
1475 icp->cprev = cp;
1476 }
1477 cp->cprev = prev;
1478 if (prev == 0)
1479 imem->cfirst = cp;
1480 else
1481 prev->cnext = cp;
1482 if (imem->pcc != 0) {
1483 imem->cc.cnext = imem->pcc->cnext;
1484 imem->cc.cprev = imem->pcc->cprev;
1485 }
1486 }
1487
1488 /* Add a chunk for ordinary allocation. */
1489 private chunk_t *
alloc_add_chunk(gs_ref_memory_t * mem,ulong csize,client_name_t cname)1490 alloc_add_chunk(gs_ref_memory_t * mem, ulong csize, client_name_t cname)
1491 {
1492 chunk_t *cp = alloc_acquire_chunk(mem, csize, true, cname);
1493
1494 if (cp) {
1495 alloc_close_chunk(mem);
1496 mem->pcc = cp;
1497 mem->cc = *mem->pcc;
1498 gs_alloc_fill(mem->cc.cbase, gs_alloc_fill_free,
1499 mem->cc.climit - mem->cc.cbase);
1500 }
1501 return cp;
1502 }
1503
1504 /* Acquire a chunk. If we would exceed MaxLocalVM (if relevant), */
1505 /* or if we would exceed the VMThreshold and psignal is NULL, */
1506 /* return 0; if we would exceed the VMThreshold but psignal is valid, */
1507 /* just set the signal and return successfully. */
1508 private chunk_t *
alloc_acquire_chunk(gs_ref_memory_t * mem,ulong csize,bool has_strings,client_name_t cname)1509 alloc_acquire_chunk(gs_ref_memory_t * mem, ulong csize, bool has_strings,
1510 client_name_t cname)
1511 {
1512 gs_raw_memory_t *parent = mem->parent;
1513 chunk_t *cp;
1514 byte *cdata;
1515
1516 #if arch_sizeof_long > arch_sizeof_int
1517 /* If csize is larger than max_uint, punt. */
1518 if (csize != (uint) csize)
1519 return 0;
1520 #endif
1521 cp = gs_raw_alloc_struct_immovable(parent, &st_chunk, cname);
1522 if ((ulong) (mem->allocated + mem->inherited) >= mem->limit) {
1523 mem->gc_status.requested += csize;
1524 if (mem->limit >= mem->gc_status.max_vm ||
1525 mem->gc_status.psignal == 0
1526 )
1527 return 0;
1528 if_debug4('0', "[0]signaling space=%d, allocated=%ld, limit=%ld, requested=%ld\n",
1529 mem->space, (long)mem->allocated,
1530 (long)mem->limit, (long)mem->gc_status.requested);
1531 *mem->gc_status.psignal = mem->gc_status.signal_value;
1532 }
1533 cdata = gs_alloc_bytes_immovable(parent, csize, cname);
1534 if (cp == 0 || cdata == 0) {
1535 gs_free_object(parent, cdata, cname);
1536 gs_free_object(parent, cp, cname);
1537 mem->gc_status.requested = csize;
1538 return 0;
1539 }
1540 alloc_init_chunk(cp, cdata, cdata + csize, has_strings, (chunk_t *) 0);
1541 alloc_link_chunk(cp, mem);
1542 mem->allocated += st_chunk.ssize + csize;
1543 return cp;
1544 }
1545
1546 /* Initialize the pointers in a chunk. This is exported for save/restore. */
1547 /* The bottom pointer must be aligned, but the top pointer need not */
1548 /* be aligned. */
1549 void
alloc_init_chunk(chunk_t * cp,byte * bot,byte * top,bool has_strings,chunk_t * outer)1550 alloc_init_chunk(chunk_t * cp, byte * bot, byte * top, bool has_strings,
1551 chunk_t * outer)
1552 {
1553 byte *cdata = bot;
1554
1555 if (outer != 0)
1556 outer->inner_count++;
1557 cp->chead = (chunk_head_t *) cdata;
1558 cdata += sizeof(chunk_head_t);
1559 cp->cbot = cp->cbase = cp->int_freed_top = cdata;
1560 cp->cend = top;
1561 cp->rcur = 0;
1562 cp->rtop = 0;
1563 cp->outer = outer;
1564 cp->inner_count = 0;
1565 cp->has_refs = false;
1566 cp->sbase = cdata;
1567 if (has_strings && top - cdata >= string_space_quantum + sizeof(long) - 1) {
1568 /*
1569 * We allocate a large enough string marking and reloc table
1570 * to cover the entire chunk.
1571 */
1572 uint nquanta = string_space_quanta(top - cdata);
1573
1574 cp->climit = cdata + nquanta * string_data_quantum;
1575 cp->smark = cp->climit;
1576 cp->smark_size = string_quanta_mark_size(nquanta);
1577 cp->sreloc =
1578 (string_reloc_offset *) (cp->smark + cp->smark_size);
1579 cp->sfree1 = (uint *) cp->sreloc;
1580 } else {
1581 /* No strings, don't need the string GC tables. */
1582 cp->climit = cp->cend;
1583 cp->sfree1 = 0;
1584 cp->smark = 0;
1585 cp->smark_size = 0;
1586 cp->sreloc = 0;
1587 }
1588 cp->ctop = cp->climit;
1589 alloc_init_free_strings(cp);
1590 }
1591
1592 /* Initialize the string freelists in a chunk. */
1593 void
alloc_init_free_strings(chunk_t * cp)1594 alloc_init_free_strings(chunk_t * cp)
1595 {
1596 if (cp->sfree1)
1597 memset(cp->sfree1, 0, STRING_FREELIST_SPACE(cp));
1598 cp->sfree = 0;
1599 }
1600
1601 /* Close up the current chunk. */
1602 /* This is exported for save/restore and the GC. */
1603 void
alloc_close_chunk(gs_ref_memory_t * mem)1604 alloc_close_chunk(gs_ref_memory_t * mem)
1605 {
1606 if (mem->pcc != 0) {
1607 *mem->pcc = mem->cc;
1608 #ifdef DEBUG
1609 if (gs_debug_c('a')) {
1610 dlprintf1("[a%d]", alloc_trace_space(mem));
1611 dprintf_chunk("closing chunk", mem->pcc);
1612 }
1613 #endif
1614 }
1615 }
1616
1617 /* Reopen the current chunk after a GC or restore. */
1618 void
alloc_open_chunk(gs_ref_memory_t * mem)1619 alloc_open_chunk(gs_ref_memory_t * mem)
1620 {
1621 if (mem->pcc != 0) {
1622 mem->cc = *mem->pcc;
1623 #ifdef DEBUG
1624 if (gs_debug_c('a')) {
1625 dlprintf1("[a%d]", alloc_trace_space(mem));
1626 dprintf_chunk("opening chunk", mem->pcc);
1627 }
1628 #endif
1629 }
1630 }
1631
1632 /* Remove a chunk from the chain. This is exported for the GC. */
1633 void
alloc_unlink_chunk(chunk_t * cp,gs_ref_memory_t * mem)1634 alloc_unlink_chunk(chunk_t * cp, gs_ref_memory_t * mem)
1635 {
1636 #ifdef DEBUG
1637 if (gs_alloc_debug) { /* Check to make sure this chunk belongs to this allocator. */
1638 const chunk_t *ap = mem->cfirst;
1639
1640 while (ap != 0 && ap != cp)
1641 ap = ap->cnext;
1642 if (ap != cp) {
1643 lprintf2("unlink_chunk 0x%lx not owned by memory 0x%lx!\n",
1644 (ulong) cp, (ulong) mem);
1645 return; /*gs_abort(); */
1646 }
1647 }
1648 #endif
1649 if (cp->cprev == 0)
1650 mem->cfirst = cp->cnext;
1651 else
1652 cp->cprev->cnext = cp->cnext;
1653 if (cp->cnext == 0)
1654 mem->clast = cp->cprev;
1655 else
1656 cp->cnext->cprev = cp->cprev;
1657 if (mem->pcc != 0) {
1658 mem->cc.cnext = mem->pcc->cnext;
1659 mem->cc.cprev = mem->pcc->cprev;
1660 if (mem->pcc == cp) {
1661 mem->pcc = 0;
1662 mem->cc.cbot = mem->cc.ctop = 0;
1663 }
1664 }
1665 }
1666
1667 /*
1668 * Free a chunk. This is exported for the GC. Since we eventually use
1669 * this to free the chunk containing the allocator itself, we must be
1670 * careful not to reference anything in the allocator after freeing the
1671 * chunk data.
1672 */
1673 void
alloc_free_chunk(chunk_t * cp,gs_ref_memory_t * mem)1674 alloc_free_chunk(chunk_t * cp, gs_ref_memory_t * mem)
1675 {
1676 gs_raw_memory_t *parent = mem->parent;
1677 byte *cdata = (byte *)cp->chead;
1678 ulong csize = (byte *)cp->cend - cdata;
1679
1680 alloc_unlink_chunk(cp, mem);
1681 mem->allocated -= st_chunk.ssize;
1682 if (mem->cfreed.cp == cp)
1683 mem->cfreed.cp = 0;
1684 if (cp->outer == 0) {
1685 mem->allocated -= csize;
1686 gs_free_object(parent, cdata, "alloc_free_chunk(data)");
1687 } else {
1688 cp->outer->inner_count--;
1689 gs_alloc_fill(cdata, gs_alloc_fill_free, csize);
1690 }
1691 gs_free_object(parent, cp, "alloc_free_chunk(chunk struct)");
1692 }
1693
1694 /* Find the chunk for a pointer. */
1695 /* Note that this only searches the current save level. */
1696 /* Since a given save level can't contain both a chunk and an inner chunk */
1697 /* of that chunk, we can stop when is_within_chunk succeeds, and just test */
1698 /* is_in_inner_chunk then. */
1699 bool
chunk_locate_ptr(const void * ptr,chunk_locator_t * clp)1700 chunk_locate_ptr(const void *ptr, chunk_locator_t * clp)
1701 {
1702 register chunk_t *cp = clp->cp;
1703
1704 if (cp == 0) {
1705 cp = clp->memory->cfirst;
1706 if (cp == 0)
1707 return false;
1708 /* ptr is in the last chunk often enough to be worth checking for. */
1709 if (PTR_GE(ptr, clp->memory->clast->cbase))
1710 cp = clp->memory->clast;
1711 }
1712 if (PTR_LT(ptr, cp->cbase)) {
1713 do {
1714 cp = cp->cprev;
1715 if (cp == 0)
1716 return false;
1717 }
1718 while (PTR_LT(ptr, cp->cbase));
1719 if (PTR_GE(ptr, cp->cend))
1720 return false;
1721 } else {
1722 while (PTR_GE(ptr, cp->cend)) {
1723 cp = cp->cnext;
1724 if (cp == 0)
1725 return false;
1726 }
1727 if (PTR_LT(ptr, cp->cbase))
1728 return false;
1729 }
1730 clp->cp = cp;
1731 return !ptr_is_in_inner_chunk(ptr, cp);
1732 }
1733
1734 /* ------ Debugging ------ */
1735
1736 #ifdef DEBUG
1737
1738 #include "string_.h"
1739
1740 inline private bool
obj_in_control_region(const void * obot,const void * otop,const dump_control_t * pdc)1741 obj_in_control_region(const void *obot, const void *otop,
1742 const dump_control_t *pdc)
1743 {
1744 return
1745 ((pdc->bottom == NULL || PTR_GT(otop, pdc->bottom)) &&
1746 (pdc->top == NULL || PTR_LT(obot, pdc->top)));
1747 }
1748
1749 const dump_control_t dump_control_default =
1750 {
1751 dump_do_default, NULL, NULL
1752 };
1753 const dump_control_t dump_control_all =
1754 {
1755 dump_do_strings | dump_do_type_addresses | dump_do_pointers |
1756 dump_do_pointed_strings | dump_do_contents, NULL, NULL
1757 };
1758
1759 /*
1760 * Internal procedure to dump a block of memory, in hex and optionally
1761 * also as characters.
1762 */
1763 private void
debug_indent(int indent)1764 debug_indent(int indent)
1765 {
1766 int i;
1767
1768 for (i = indent; i > 0; --i)
1769 dputc(' ');
1770 }
1771 private void
debug_dump_contents(const byte * bot,const byte * top,int indent,bool as_chars)1772 debug_dump_contents(const byte * bot, const byte * top, int indent,
1773 bool as_chars)
1774 {
1775 const byte *block;
1776
1777 #define block_size 16
1778
1779 if (bot >= top)
1780 return;
1781 for (block = bot - ((bot - (byte *) 0) & (block_size - 1));
1782 block < top; block += block_size
1783 ) {
1784 int i;
1785 char label[12];
1786
1787 /* Check for repeated blocks. */
1788 if (block >= bot + block_size &&
1789 block <= top - (block_size * 2) &&
1790 !memcmp(block, block - block_size, block_size) &&
1791 !memcmp(block, block + block_size, block_size)
1792 ) {
1793 if (block < bot + block_size * 2 ||
1794 memcmp(block, block - block_size * 2, block_size)
1795 ) {
1796 debug_indent(indent);
1797 dputs(" ...\n");
1798 }
1799 continue;
1800 }
1801 sprintf(label, "0x%lx:", (ulong) block);
1802 debug_indent(indent);
1803 dputs(label);
1804 for (i = 0; i < block_size; ++i) {
1805 const char *sepr = ((i & 3) == 0 && i != 0 ? " " : " ");
1806
1807 dputs(sepr);
1808 if (block + i >= bot && block + i < top)
1809 dprintf1("%02x", block[i]);
1810 else
1811 dputs(" ");
1812 }
1813 dputc('\n');
1814 if (as_chars) {
1815 debug_indent(indent + strlen(label));
1816 for (i = 0; i < block_size; ++i) {
1817 byte ch;
1818
1819 if ((i & 3) == 0 && i != 0)
1820 dputc(' ');
1821 if (block + i >= bot && block + i < top &&
1822 (ch = block[i]) >= 32 && ch <= 126
1823 )
1824 dprintf1(" %c", ch);
1825 else
1826 dputs(" ");
1827 }
1828 dputc('\n');
1829 }
1830 }
1831 #undef block_size
1832 }
1833
1834 /* Print one object with the given options. */
1835 /* Relevant options: type_addresses, no_types, pointers, pointed_strings, */
1836 /* contents. */
1837 void
debug_print_object(const void * obj,const dump_control_t * control)1838 debug_print_object(const void *obj, const dump_control_t * control)
1839 {
1840 const obj_header_t *pre = ((const obj_header_t *)obj) - 1;
1841 ulong size = pre_obj_contents_size(pre);
1842 const gs_memory_struct_type_t *type = pre->o_type;
1843 dump_options_t options = control->options;
1844
1845 dprintf3(" pre=0x%lx(obj=0x%lx) size=%lu", (ulong) pre, (ulong) obj,
1846 size);
1847 switch (options & (dump_do_type_addresses | dump_do_no_types)) {
1848 case dump_do_type_addresses + dump_do_no_types: /* addresses only */
1849 dprintf1(" type=0x%lx", (ulong) type);
1850 break;
1851 case dump_do_type_addresses: /* addresses & names */
1852 dprintf2(" type=%s(0x%lx)", struct_type_name_string(type),
1853 (ulong) type);
1854 break;
1855 case 0: /* names only */
1856 dprintf1(" type=%s", struct_type_name_string(type));
1857 case dump_do_no_types: /* nothing */
1858 ;
1859 }
1860 if (options & dump_do_marks) {
1861 dprintf2(" smark/back=%u (0x%x)", pre->o_smark, pre->o_smark);
1862 }
1863 dputc('\n');
1864 if (type == &st_free)
1865 return;
1866 if (options & dump_do_pointers) {
1867 struct_proc_enum_ptrs((*proc)) = type->enum_ptrs;
1868 uint index = 0;
1869 enum_ptr_t eptr;
1870 gs_ptr_type_t ptype;
1871
1872 if (proc != gs_no_struct_enum_ptrs)
1873 for (; (ptype = (*proc)(pre + 1, size, index, &eptr, type, NULL)) != 0;
1874 ++index
1875 ) {
1876 const void *ptr = eptr.ptr;
1877
1878 dprintf1(" ptr %u: ", index);
1879 if (ptype == ptr_string_type || ptype == ptr_const_string_type) {
1880 const gs_const_string *str = (const gs_const_string *)ptr;
1881
1882 dprintf2("0x%lx(%u)", (ulong) str->data, str->size);
1883 if (options & dump_do_pointed_strings) {
1884 dputs(" =>\n");
1885 debug_dump_contents(str->data, str->data + str->size, 6,
1886 true);
1887 } else {
1888 dputc('\n');
1889 }
1890 } else {
1891 dprintf1((PTR_BETWEEN(ptr, obj, (const byte *)obj + size) ?
1892 "(0x%lx)\n" : "0x%lx\n"), (ulong) ptr);
1893 }
1894 }
1895 }
1896 if (options & dump_do_contents) {
1897 debug_dump_contents((const byte *)obj, (const byte *)obj + size,
1898 0, false);
1899 }
1900 }
1901
1902 /* Print the contents of a chunk with the given options. */
1903 /* Relevant options: all. */
1904 void
debug_dump_chunk(const chunk_t * cp,const dump_control_t * control)1905 debug_dump_chunk(const chunk_t * cp, const dump_control_t * control)
1906 {
1907 dprintf1("chunk at 0x%lx:\n", (ulong) cp);
1908 dprintf3(" chead=0x%lx cbase=0x%lx sbase=0x%lx\n",
1909 (ulong) cp->chead, (ulong) cp->cbase, (ulong) cp->sbase);
1910 dprintf3(" rcur=0x%lx rtop=0x%lx cbot=0x%lx\n",
1911 (ulong) cp->rcur, (ulong) cp->rtop, (ulong) cp->cbot);
1912 dprintf4(" ctop=0x%lx climit=0x%lx smark=0x%lx, size=%u\n",
1913 (ulong) cp->ctop, (ulong) cp->climit, (ulong) cp->smark,
1914 cp->smark_size);
1915 dprintf2(" sreloc=0x%lx cend=0x%lx\n",
1916 (ulong) cp->sreloc, (ulong) cp->cend);
1917 dprintf5("cprev=0x%lx cnext=0x%lx outer=0x%lx inner_count=%u has_refs=%s\n",
1918 (ulong) cp->cprev, (ulong) cp->cnext, (ulong) cp->outer,
1919 cp->inner_count, (cp->has_refs ? "true" : "false"));
1920
1921 dprintf2(" sfree1=0x%lx sfree=0x%x\n",
1922 (ulong) cp->sfree1, cp->sfree);
1923 if (control->options & dump_do_strings) {
1924 debug_dump_contents((control->bottom == 0 ? cp->ctop :
1925 max(control->bottom, cp->ctop)),
1926 (control->top == 0 ? cp->climit :
1927 min(control->top, cp->climit)),
1928 0, true);
1929 }
1930 SCAN_CHUNK_OBJECTS(cp)
1931 DO_ALL
1932 if (obj_in_control_region(pre + 1,
1933 (const byte *)(pre + 1) + size,
1934 control)
1935 )
1936 debug_print_object(pre + 1, control);
1937 /* Temporarily redefine gs_exit so a chunk parsing error */
1938 /* won't actually exit. */
1939 #define gs_exit(n) DO_NOTHING
1940 END_OBJECTS_SCAN
1941 #undef gs_exit
1942 }
1943 void
debug_print_chunk(const chunk_t * cp)1944 debug_print_chunk(const chunk_t * cp)
1945 {
1946 dump_control_t control;
1947
1948 control = dump_control_default;
1949 debug_dump_chunk(cp, &control);
1950 }
1951
1952 /* Print the contents of all chunks managed by an allocator. */
1953 /* Relevant options: all. */
1954 void
debug_dump_memory(const gs_ref_memory_t * mem,const dump_control_t * control)1955 debug_dump_memory(const gs_ref_memory_t * mem, const dump_control_t * control)
1956 {
1957 const chunk_t *mcp;
1958
1959 for (mcp = mem->cfirst; mcp != 0; mcp = mcp->cnext) {
1960 const chunk_t *cp = (mcp == mem->pcc ? &mem->cc : mcp);
1961
1962 if (obj_in_control_region(cp->cbase, cp->cend, control))
1963 debug_dump_chunk(cp, control);
1964 }
1965 }
1966
1967 /* Find all the objects that contain a given pointer. */
1968 void
debug_find_pointers(const gs_ref_memory_t * mem,const void * target)1969 debug_find_pointers(const gs_ref_memory_t *mem, const void *target)
1970 {
1971 dump_control_t control;
1972 const chunk_t *mcp;
1973
1974 control.options = 0;
1975 for (mcp = mem->cfirst; mcp != 0; mcp = mcp->cnext) {
1976 const chunk_t *cp = (mcp == mem->pcc ? &mem->cc : mcp);
1977
1978 SCAN_CHUNK_OBJECTS(cp);
1979 DO_ALL
1980 struct_proc_enum_ptrs((*proc)) = pre->o_type->enum_ptrs;
1981 uint index = 0;
1982 enum_ptr_t eptr;
1983
1984 if (proc) /* doesn't trace refs */
1985 for (; (*proc)(pre + 1, size, index, &eptr, pre->o_type, NULL); ++index)
1986 if (eptr.ptr == target) {
1987 dprintf1("Index %d in", index);
1988 debug_print_object(pre + 1, &control);
1989 }
1990 /* Temporarily redefine gs_exit so a chunk parsing error */
1991 /* won't actually exit. */
1992 #define gs_exit(n) DO_NOTHING
1993 END_OBJECTS_SCAN
1994 #undef gs_exit
1995 }
1996 }
1997
1998 #endif /* DEBUG */
1999