1 /* Copyright (C) 2001-2006 Artifex Software, Inc.
2 All Rights Reserved.
3
4 This software is provided AS-IS with no warranty, either express or
5 implied.
6
7 This software is distributed under license and may not be copied, modified
8 or distributed except as expressly authorized under the terms of that
9 license. Refer to licensing information at http://www.artifex.com/
10 or contact Artifex Software, Inc., 7 Mt. Lassen Drive - Suite A-134,
11 San Rafael, CA 94903, U.S.A., +1(415)492-9861, for further information.
12 */
13
14 /* $Id: gsalloc.c 10411 2009-11-30 20:34:48Z henrys $ */
15 /* Standard memory allocator */
16 #include "gx.h"
17 #include "memory_.h"
18 #include "gserrors.h"
19 #include "gsexit.h"
20 #include "gsmdebug.h"
21 #include "gsstruct.h"
22 #include "gxalloc.h"
23 #include "stream.h" /* for clearing stream list */
24
25 /*
26 * Define whether to try consolidating space before adding a new chunk.
27 * The default is not to do this, because it is computationally
28 * expensive and doesn't seem to help much. However, this is done for
29 * "controlled" spaces whether or not the #define is in effect.
30 */
31 /*#define CONSOLIDATE_BEFORE_ADDING_CHUNK */
32
33
34 /*
35 * This allocator produces tracing messages of the form
36 * [aNMOTS]...
37 * where
38 * N is the VM space number, +1 if we are allocating from stable memory.
39 * M is : for movable objects, | for immovable,
40 * O is {alloc = +, free = -, grow = >, shrink = <},
41 * T is {bytes = b, object = <, ref = $, string = >}, and
42 * S is {small freelist = f, large freelist = F, LIFO = space,
43 * own chunk = L, lost = #, lost own chunk = ~, other = .}.
44 */
45 #ifdef DEBUG
46 static int
alloc_trace_space(const gs_ref_memory_t * imem)47 alloc_trace_space(const gs_ref_memory_t *imem)
48 {
49 return imem->space + (imem->stable_memory == (const gs_memory_t *)imem);
50 }
51 static void
alloc_trace(const char * chars,gs_ref_memory_t * imem,client_name_t cname,gs_memory_type_ptr_t stype,uint size,const void * ptr)52 alloc_trace(const char *chars, gs_ref_memory_t * imem, client_name_t cname,
53 gs_memory_type_ptr_t stype, uint size, const void *ptr)
54 {
55 if_debug7('A', "[a%d%s]%s %s(%u) %s0x%lx\n",
56 alloc_trace_space(imem), chars, client_name_string(cname),
57 (ptr == 0 || stype == 0 ? "" :
58 struct_type_name_string(stype)),
59 size, (chars[1] == '+' ? "= " : ""), (ulong) ptr);
60 }
61 static bool
alloc_size_is_ok(gs_memory_type_ptr_t stype)62 alloc_size_is_ok(gs_memory_type_ptr_t stype)
63 {
64 return (stype->ssize > 0 && stype->ssize < 0x100000);
65 }
66 # define ALLOC_CHECK_SIZE(stype)\
67 BEGIN\
68 if (!alloc_size_is_ok(stype)) {\
69 lprintf2("size of struct type 0x%lx is 0x%lx!\n",\
70 (ulong)(stype), (ulong)((stype)->ssize));\
71 return 0;\
72 }\
73 END
74 #else
75 # define alloc_trace(chars, imem, cname, stype, size, ptr) DO_NOTHING
76 # define ALLOC_CHECK_SIZE(stype) DO_NOTHING
77 #endif
78
79 /*
80 * The structure descriptor for allocators. Even though allocators
81 * are allocated outside GC space, they reference objects within it.
82 */
83 public_st_ref_memory();
84 static
85 ENUM_PTRS_BEGIN(ref_memory_enum_ptrs) return 0;
86 ENUM_PTR3(0, gs_ref_memory_t, streams, names_array, changes);
87 ENUM_PTR(3, gs_ref_memory_t, saved);
88 ENUM_PTR(4, gs_ref_memory_t, scan_limit);
89 ENUM_PTRS_END
RELOC_PTRS_WITH(ref_memory_reloc_ptrs,gs_ref_memory_t * mptr)90 static RELOC_PTRS_WITH(ref_memory_reloc_ptrs, gs_ref_memory_t *mptr)
91 {
92 RELOC_PTR(gs_ref_memory_t, streams);
93 RELOC_PTR(gs_ref_memory_t, names_array);
94 RELOC_PTR(gs_ref_memory_t, changes);
95 RELOC_PTR(gs_ref_memory_t, scan_limit);
96 /* Don't relocate the saved pointer now -- see igc.c for details. */
97 mptr->reloc_saved = RELOC_OBJ(mptr->saved);
98 }
99 RELOC_PTRS_END
100
101 /*
102 * Define the flags for alloc_obj, which implements all but the fastest
103 * case of allocation.
104 */
105 typedef enum {
106 ALLOC_IMMOVABLE = 1,
107 ALLOC_DIRECT = 2 /* called directly, without fast-case checks */
108 } alloc_flags_t;
109
110 /* Forward references */
111 static void remove_range_from_freelist(gs_ref_memory_t *mem, void* bottom, void* top);
112 static obj_header_t *large_freelist_alloc(gs_ref_memory_t *mem, uint size);
113 static obj_header_t *scavenge_low_free(gs_ref_memory_t *mem, unsigned request_size);
114 static ulong compute_free_objects(gs_ref_memory_t *);
115 static obj_header_t *alloc_obj(gs_ref_memory_t *, ulong, gs_memory_type_ptr_t, alloc_flags_t, client_name_t);
116 static void consolidate_chunk_free(chunk_t *cp, gs_ref_memory_t *mem);
117 static void trim_obj(gs_ref_memory_t *mem, obj_header_t *obj, uint size, chunk_t *cp);
118 static chunk_t *alloc_acquire_chunk(gs_ref_memory_t *, ulong, bool, client_name_t);
119 static chunk_t *alloc_add_chunk(gs_ref_memory_t *, ulong, client_name_t);
120 void alloc_close_chunk(gs_ref_memory_t *);
121
122 /*
123 * Define the standard implementation (with garbage collection)
124 * of Ghostscript's memory manager interface.
125 */
126 /* Raw memory procedures */
127 static gs_memory_proc_alloc_bytes(i_alloc_bytes_immovable);
128 static gs_memory_proc_resize_object(i_resize_object);
129 static gs_memory_proc_free_object(i_free_object);
130 static gs_memory_proc_stable(i_stable);
131 static gs_memory_proc_status(i_status);
132 static gs_memory_proc_free_all(i_free_all);
133 static gs_memory_proc_consolidate_free(i_consolidate_free);
134
135 /* Object memory procedures */
136 static gs_memory_proc_alloc_bytes(i_alloc_bytes);
137 static gs_memory_proc_alloc_struct(i_alloc_struct);
138 static gs_memory_proc_alloc_struct(i_alloc_struct_immovable);
139 static gs_memory_proc_alloc_byte_array(i_alloc_byte_array);
140 static gs_memory_proc_alloc_byte_array(i_alloc_byte_array_immovable);
141 static gs_memory_proc_alloc_struct_array(i_alloc_struct_array);
142 static gs_memory_proc_alloc_struct_array(i_alloc_struct_array_immovable);
143 static gs_memory_proc_object_size(i_object_size);
144 static gs_memory_proc_object_type(i_object_type);
145 static gs_memory_proc_alloc_string(i_alloc_string);
146 static gs_memory_proc_alloc_string(i_alloc_string_immovable);
147 static gs_memory_proc_resize_string(i_resize_string);
148 static gs_memory_proc_free_string(i_free_string);
149 static gs_memory_proc_register_root(i_register_root);
150 static gs_memory_proc_unregister_root(i_unregister_root);
151 static gs_memory_proc_enable_free(i_enable_free);
152
153 /* We export the procedures for subclasses. */
154 const gs_memory_procs_t gs_ref_memory_procs =
155 {
156 /* Raw memory procedures */
157 i_alloc_bytes_immovable,
158 i_resize_object,
159 i_free_object,
160 i_stable,
161 i_status,
162 i_free_all,
163 i_consolidate_free,
164 /* Object memory procedures */
165 i_alloc_bytes,
166 i_alloc_struct,
167 i_alloc_struct_immovable,
168 i_alloc_byte_array,
169 i_alloc_byte_array_immovable,
170 i_alloc_struct_array,
171 i_alloc_struct_array_immovable,
172 i_object_size,
173 i_object_type,
174 i_alloc_string,
175 i_alloc_string_immovable,
176 i_resize_string,
177 i_free_string,
178 i_register_root,
179 i_unregister_root,
180 i_enable_free
181 };
182
183 /*
184 * Allocate and mostly initialize the state of an allocator (system, global,
185 * or local). Does not initialize global or space.
186 */
187 static void *ialloc_solo(gs_memory_t *, gs_memory_type_ptr_t,
188 chunk_t **);
189 gs_ref_memory_t *
ialloc_alloc_state(gs_memory_t * parent,uint chunk_size)190 ialloc_alloc_state(gs_memory_t * parent, uint chunk_size)
191 {
192 chunk_t *cp;
193 gs_ref_memory_t *iimem = ialloc_solo(parent, &st_ref_memory, &cp);
194
195 if (iimem == 0)
196 return 0;
197 iimem->stable_memory = (gs_memory_t *)iimem;
198 iimem->procs = gs_ref_memory_procs;
199 iimem->gs_lib_ctx = parent->gs_lib_ctx;
200 iimem->non_gc_memory = parent;
201 iimem->chunk_size = chunk_size;
202 iimem->large_size = ((chunk_size / 4) & -obj_align_mod) + 1;
203 iimem->is_controlled = false;
204 iimem->gc_status.vm_threshold = chunk_size * 3L;
205 iimem->gc_status.max_vm = 0x7fffffff;
206 iimem->gc_status.psignal = NULL;
207 iimem->gc_status.signal_value = 0;
208 iimem->gc_status.enabled = false;
209 iimem->gc_status.requested = 0;
210 iimem->gc_allocated = 0;
211 iimem->previous_status.allocated = 0;
212 iimem->previous_status.used = 0;
213 ialloc_reset(iimem);
214 iimem->cfirst = iimem->clast = cp;
215 ialloc_set_limit(iimem);
216 iimem->cc.cbot = iimem->cc.ctop = 0;
217 iimem->pcc = 0;
218 iimem->save_level = 0;
219 iimem->new_mask = 0;
220 iimem->test_mask = ~0;
221 iimem->streams = 0;
222 iimem->names_array = 0;
223 iimem->roots = 0;
224 iimem->num_contexts = 0;
225 iimem->saved = 0;
226 return iimem;
227 }
228
229 /* Allocate a 'solo' object with its own chunk. */
230 static void *
ialloc_solo(gs_memory_t * parent,gs_memory_type_ptr_t pstype,chunk_t ** pcp)231 ialloc_solo(gs_memory_t * parent, gs_memory_type_ptr_t pstype,
232 chunk_t ** pcp)
233 { /*
234 * We can't assume that the parent uses the same object header
235 * that we do, but the GC requires that allocators have
236 * such a header. Therefore, we prepend one explicitly.
237 */
238 chunk_t *cp =
239 gs_raw_alloc_struct_immovable(parent, &st_chunk,
240 "ialloc_solo(chunk)");
241 uint csize =
242 ROUND_UP(sizeof(chunk_head_t) + sizeof(obj_header_t) +
243 pstype->ssize,
244 obj_align_mod);
245 byte *cdata = gs_alloc_bytes_immovable(parent, csize, "ialloc_solo");
246 obj_header_t *obj = (obj_header_t *) (cdata + sizeof(chunk_head_t));
247
248 if (cp == 0 || cdata == 0)
249 return 0;
250 alloc_init_chunk(cp, cdata, cdata + csize, false, (chunk_t *) NULL);
251 cp->cbot = cp->ctop;
252 cp->cprev = cp->cnext = 0;
253 /* Construct the object header "by hand". */
254 obj->o_alone = 1;
255 obj->o_size = pstype->ssize;
256 obj->o_type = pstype;
257 *pcp = cp;
258 return (void *)(obj + 1);
259 }
260
261 /*
262 * Add a chunk to an externally controlled allocator. Such allocators
263 * allocate all objects as immovable, are not garbage-collected, and
264 * don't attempt to acquire additional memory on their own.
265 */
266 int
ialloc_add_chunk(gs_ref_memory_t * imem,ulong space,client_name_t cname)267 ialloc_add_chunk(gs_ref_memory_t *imem, ulong space, client_name_t cname)
268 {
269 chunk_t *cp;
270
271 /* Allow acquisition of this chunk. */
272 imem->is_controlled = false;
273 imem->large_size = imem->chunk_size;
274 imem->limit = max_long;
275 imem->gc_status.max_vm = max_long;
276
277 /* Acquire the chunk. */
278 cp = alloc_add_chunk(imem, space, cname);
279
280 /*
281 * Make all allocations immovable. Since the "movable" allocators
282 * allocate within existing chunks, whereas the "immovable" ones
283 * allocate in new chunks, we equate the latter to the former, even
284 * though this seems backwards.
285 */
286 imem->procs.alloc_bytes_immovable = imem->procs.alloc_bytes;
287 imem->procs.alloc_struct_immovable = imem->procs.alloc_struct;
288 imem->procs.alloc_byte_array_immovable = imem->procs.alloc_byte_array;
289 imem->procs.alloc_struct_array_immovable = imem->procs.alloc_struct_array;
290 imem->procs.alloc_string_immovable = imem->procs.alloc_string;
291
292 /* Disable acquisition of additional chunks. */
293 imem->is_controlled = true;
294 imem->limit = 0;
295
296 return (cp ? 0 : gs_note_error(gs_error_VMerror));
297 }
298
299 /* Prepare for a GC by clearing the stream list. */
300 /* This probably belongs somewhere else.... */
301 void
ialloc_gc_prepare(gs_ref_memory_t * mem)302 ialloc_gc_prepare(gs_ref_memory_t * mem)
303 { /*
304 * We have to unlink every stream from its neighbors,
305 * so that referenced streams don't keep all streams around.
306 */
307 while (mem->streams != 0) {
308 stream *s = mem->streams;
309
310 mem->streams = s->next;
311 s->prev = s->next = 0;
312 }
313 }
314
315 /* Initialize after a save. */
316 void
ialloc_reset(gs_ref_memory_t * mem)317 ialloc_reset(gs_ref_memory_t * mem)
318 {
319 mem->cfirst = 0;
320 mem->clast = 0;
321 mem->cc.rcur = 0;
322 mem->cc.rtop = 0;
323 mem->cc.has_refs = false;
324 mem->allocated = 0;
325 mem->changes = 0;
326 mem->scan_limit = 0;
327 mem->total_scanned = 0;
328 mem->total_scanned_after_compacting = 0;
329 ialloc_reset_free(mem);
330 }
331
332 /* Initialize after a save or GC. */
333 void
ialloc_reset_free(gs_ref_memory_t * mem)334 ialloc_reset_free(gs_ref_memory_t * mem)
335 {
336 int i;
337 obj_header_t **p;
338
339 mem->lost.objects = 0;
340 mem->lost.refs = 0;
341 mem->lost.strings = 0;
342 mem->cfreed.cp = 0;
343 for (i = 0, p = &mem->freelists[0]; i < num_freelists; i++, p++)
344 *p = 0;
345 mem->largest_free_size = 0;
346 }
347
348 /*
349 * Set an arbitrary limit so that the amount of allocated VM does not grow
350 * indefinitely even when GC is disabled. Benchmarks have shown that
351 * the resulting GC's are infrequent enough not to degrade performance
352 * significantly.
353 */
354 #define FORCE_GC_LIMIT 8000000
355
356 /* Set the allocation limit after a change in one or more of */
357 /* vm_threshold, max_vm, or enabled, or after a GC. */
358 void
ialloc_set_limit(register gs_ref_memory_t * mem)359 ialloc_set_limit(register gs_ref_memory_t * mem)
360 { /*
361 * The following code is intended to set the limit so that
362 * we stop allocating when allocated + previous_status.allocated
363 * exceeds the lesser of max_vm or (if GC is enabled)
364 * gc_allocated + vm_threshold.
365 */
366 ulong max_allocated =
367 (mem->gc_status.max_vm > mem->previous_status.allocated ?
368 mem->gc_status.max_vm - mem->previous_status.allocated :
369 0);
370
371 if (mem->gc_status.enabled) {
372 ulong limit = mem->gc_allocated + mem->gc_status.vm_threshold;
373
374 if (limit < mem->previous_status.allocated)
375 mem->limit = 0;
376 else {
377 limit -= mem->previous_status.allocated;
378 mem->limit = min(limit, max_allocated);
379 }
380 } else
381 mem->limit = min(max_allocated, mem->gc_allocated + FORCE_GC_LIMIT);
382 if_debug7('0', "[0]space=%d, max_vm=%ld, prev.alloc=%ld, enabled=%d,\n\
383 gc_alloc=%ld, threshold=%ld => limit=%ld\n",
384 mem->space, (long)mem->gc_status.max_vm,
385 (long)mem->previous_status.allocated,
386 mem->gc_status.enabled, (long)mem->gc_allocated,
387 (long)mem->gc_status.vm_threshold, (long)mem->limit);
388 }
389
390 /*
391 * Free all the memory owned by the allocator, except the allocator itself.
392 * Note that this only frees memory at the current save level: the client
393 * is responsible for restoring to the outermost level if desired.
394 */
395 static void
i_free_all(gs_memory_t * mem,uint free_mask,client_name_t cname)396 i_free_all(gs_memory_t * mem, uint free_mask, client_name_t cname)
397 {
398 gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
399 chunk_t *cp;
400
401 if (free_mask & FREE_ALL_DATA) {
402 chunk_t *csucc;
403
404 /*
405 * Free the chunks in reverse order, to encourage LIFO behavior.
406 * Don't free the chunk holding the allocator itself.
407 */
408 for (cp = imem->clast; cp != 0; cp = csucc) {
409 csucc = cp->cprev; /* save before freeing */
410 if (cp->cbase + sizeof(obj_header_t) != (byte *)mem)
411 alloc_free_chunk(cp, imem);
412 }
413 }
414 if (free_mask & FREE_ALL_ALLOCATOR) {
415 /* Free the chunk holding the allocator itself. */
416 for (cp = imem->clast; cp != 0; cp = cp->cprev)
417 if (cp->cbase + sizeof(obj_header_t) == (byte *)mem) {
418 alloc_free_chunk(cp, imem);
419 break;
420 }
421 }
422 }
423
424 /* ================ Accessors ================ */
425
426 /* Get the size of an object from the header. */
427 static uint
i_object_size(gs_memory_t * mem,const void * obj)428 i_object_size(gs_memory_t * mem, const void /*obj_header_t */ *obj)
429 {
430 return pre_obj_contents_size((const obj_header_t *)obj - 1);
431 }
432
433 /* Get the type of a structure from the header. */
434 static gs_memory_type_ptr_t
i_object_type(const gs_memory_t * mem,const void * obj)435 i_object_type(const gs_memory_t * mem, const void /*obj_header_t */ *obj)
436 {
437 return ((const obj_header_t *)obj - 1)->o_type;
438 }
439
440 /* Get the GC status of a memory. */
441 void
gs_memory_gc_status(const gs_ref_memory_t * mem,gs_memory_gc_status_t * pstat)442 gs_memory_gc_status(const gs_ref_memory_t * mem, gs_memory_gc_status_t * pstat)
443 {
444 *pstat = mem->gc_status;
445 }
446
447 /* Set the GC status of a memory. */
448 void
gs_memory_set_gc_status(gs_ref_memory_t * mem,const gs_memory_gc_status_t * pstat)449 gs_memory_set_gc_status(gs_ref_memory_t * mem, const gs_memory_gc_status_t * pstat)
450 {
451 mem->gc_status = *pstat;
452 ialloc_set_limit(mem);
453 }
454
455 /* Set VM threshold. */
456 void
gs_memory_set_vm_threshold(gs_ref_memory_t * mem,long val)457 gs_memory_set_vm_threshold(gs_ref_memory_t * mem, long val)
458 {
459 gs_memory_gc_status_t stat;
460 gs_ref_memory_t * stable = (gs_ref_memory_t *)mem->stable_memory;
461
462 gs_memory_gc_status(mem, &stat);
463 stat.vm_threshold = val;
464 gs_memory_set_gc_status(mem, &stat);
465 gs_memory_gc_status(stable, &stat);
466 stat.vm_threshold = val;
467 gs_memory_set_gc_status(stable, &stat);
468 }
469
470 /* Set VM reclaim. */
471 void
gs_memory_set_vm_reclaim(gs_ref_memory_t * mem,bool enabled)472 gs_memory_set_vm_reclaim(gs_ref_memory_t * mem, bool enabled)
473 {
474 gs_memory_gc_status_t stat;
475 gs_ref_memory_t * stable = (gs_ref_memory_t *)mem->stable_memory;
476
477 gs_memory_gc_status(mem, &stat);
478 stat.enabled = enabled;
479 gs_memory_set_gc_status(mem, &stat);
480 gs_memory_gc_status(stable, &stat);
481 stat.enabled = enabled;
482 gs_memory_set_gc_status(stable, &stat);
483 }
484
485 /* ================ Objects ================ */
486
487 /* Allocate a small object quickly if possible. */
488 /* The size must be substantially less than max_uint. */
489 /* ptr must be declared as obj_header_t *. */
490 /* pfl must be declared as obj_header_t **. */
491 #define IF_FREELIST_ALLOC(ptr, imem, size, pstype, pfl)\
492 if ( size <= max_freelist_size &&\
493 *(pfl = &imem->freelists[(size + obj_align_mask) >> log2_obj_align_mod]) != 0\
494 )\
495 { ptr = *pfl;\
496 *pfl = *(obj_header_t **)ptr;\
497 ptr[-1].o_size = size;\
498 ptr[-1].o_type = pstype;\
499 /* If debugging, clear the block in an attempt to */\
500 /* track down uninitialized data errors. */\
501 gs_alloc_fill(ptr, gs_alloc_fill_alloc, size);
502 #define ELSEIF_BIG_FREELIST_ALLOC(ptr, imem, size, pstype)\
503 }\
504 else if (size > max_freelist_size &&\
505 (ptr = large_freelist_alloc(imem, size)) != 0)\
506 { ptr[-1].o_type = pstype;\
507 /* If debugging, clear the block in an attempt to */\
508 /* track down uninitialized data errors. */\
509 gs_alloc_fill(ptr, gs_alloc_fill_alloc, size);
510 #define ELSEIF_LIFO_ALLOC(ptr, imem, size, pstype)\
511 }\
512 else if ( (imem->cc.ctop - (byte *)(ptr = (obj_header_t *)imem->cc.cbot))\
513 >= size + (obj_align_mod + sizeof(obj_header_t) * 2) &&\
514 size < imem->large_size\
515 )\
516 { imem->cc.cbot = (byte *)ptr + obj_size_round(size);\
517 ptr->o_alone = 0;\
518 ptr->o_size = size;\
519 ptr->o_type = pstype;\
520 ptr++;\
521 /* If debugging, clear the block in an attempt to */\
522 /* track down uninitialized data errors. */\
523 gs_alloc_fill(ptr, gs_alloc_fill_alloc, size);
524 #define ELSE_ALLOC\
525 }\
526 else
527
528 static byte *
i_alloc_bytes(gs_memory_t * mem,uint size,client_name_t cname)529 i_alloc_bytes(gs_memory_t * mem, uint size, client_name_t cname)
530 {
531 gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
532 obj_header_t *obj;
533 obj_header_t **pfl;
534
535 IF_FREELIST_ALLOC(obj, imem, size, &st_bytes, pfl)
536 alloc_trace(":+bf", imem, cname, NULL, size, obj);
537 ELSEIF_BIG_FREELIST_ALLOC(obj, imem, size, &st_bytes)
538 alloc_trace(":+bF", imem, cname, NULL, size, obj);
539 ELSEIF_LIFO_ALLOC(obj, imem, size, &st_bytes)
540 alloc_trace(":+b ", imem, cname, NULL, size, obj);
541 ELSE_ALLOC
542 {
543 obj = alloc_obj(imem, size, &st_bytes, 0, cname);
544 if (obj == 0)
545 return 0;
546 alloc_trace(":+b.", imem, cname, NULL, size, obj);
547 }
548 #if IGC_PTR_STABILITY_CHECK
549 obj[-1].d.o.space_id = imem->space_id;
550 #endif
551 return (byte *) obj;
552 }
553 static byte *
i_alloc_bytes_immovable(gs_memory_t * mem,uint size,client_name_t cname)554 i_alloc_bytes_immovable(gs_memory_t * mem, uint size, client_name_t cname)
555 {
556 gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
557 obj_header_t *obj = alloc_obj(imem, size, &st_bytes,
558 ALLOC_IMMOVABLE | ALLOC_DIRECT, cname);
559
560 if (obj == 0)
561 return 0;
562 alloc_trace("|+b.", imem, cname, NULL, size, obj);
563 return (byte *) obj;
564 }
565 static void *
i_alloc_struct(gs_memory_t * mem,gs_memory_type_ptr_t pstype,client_name_t cname)566 i_alloc_struct(gs_memory_t * mem, gs_memory_type_ptr_t pstype,
567 client_name_t cname)
568 {
569 gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
570 uint size = pstype->ssize;
571 obj_header_t *obj;
572 obj_header_t **pfl;
573
574 ALLOC_CHECK_SIZE(pstype);
575 IF_FREELIST_ALLOC(obj, imem, size, pstype, pfl)
576 alloc_trace(":+<f", imem, cname, pstype, size, obj);
577 ELSEIF_BIG_FREELIST_ALLOC(obj, imem, size, pstype)
578 alloc_trace(":+<F", imem, cname, pstype, size, obj);
579 ELSEIF_LIFO_ALLOC(obj, imem, size, pstype)
580 alloc_trace(":+< ", imem, cname, pstype, size, obj);
581 ELSE_ALLOC
582 {
583 obj = alloc_obj(imem, size, pstype, 0, cname);
584 if (obj == 0)
585 return 0;
586 alloc_trace(":+<.", imem, cname, pstype, size, obj);
587 }
588 #if IGC_PTR_STABILITY_CHECK
589 obj[-1].d.o.space_id = imem->space_id;
590 #endif
591 return obj;
592 }
593 static void *
i_alloc_struct_immovable(gs_memory_t * mem,gs_memory_type_ptr_t pstype,client_name_t cname)594 i_alloc_struct_immovable(gs_memory_t * mem, gs_memory_type_ptr_t pstype,
595 client_name_t cname)
596 {
597 gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
598 uint size = pstype->ssize;
599 obj_header_t *obj;
600
601 ALLOC_CHECK_SIZE(pstype);
602 obj = alloc_obj(imem, size, pstype, ALLOC_IMMOVABLE | ALLOC_DIRECT, cname);
603 alloc_trace("|+<.", imem, cname, pstype, size, obj);
604 return obj;
605 }
606 static byte *
i_alloc_byte_array(gs_memory_t * mem,uint num_elements,uint elt_size,client_name_t cname)607 i_alloc_byte_array(gs_memory_t * mem, uint num_elements, uint elt_size,
608 client_name_t cname)
609 {
610 gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
611 obj_header_t *obj = alloc_obj(imem, (ulong) num_elements * elt_size,
612 &st_bytes, ALLOC_DIRECT, cname);
613
614 if_debug6('A', "[a%d:+b.]%s -bytes-*(%lu=%u*%u) = 0x%lx\n",
615 alloc_trace_space(imem), client_name_string(cname),
616 (ulong) num_elements * elt_size,
617 num_elements, elt_size, (ulong) obj);
618 return (byte *) obj;
619 }
620 static byte *
i_alloc_byte_array_immovable(gs_memory_t * mem,uint num_elements,uint elt_size,client_name_t cname)621 i_alloc_byte_array_immovable(gs_memory_t * mem, uint num_elements,
622 uint elt_size, client_name_t cname)
623 {
624 gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
625 obj_header_t *obj = alloc_obj(imem, (ulong) num_elements * elt_size,
626 &st_bytes, ALLOC_IMMOVABLE | ALLOC_DIRECT,
627 cname);
628
629 if_debug6('A', "[a%d|+b.]%s -bytes-*(%lu=%u*%u) = 0x%lx\n",
630 alloc_trace_space(imem), client_name_string(cname),
631 (ulong) num_elements * elt_size,
632 num_elements, elt_size, (ulong) obj);
633 return (byte *) obj;
634 }
635 static void *
i_alloc_struct_array(gs_memory_t * mem,uint num_elements,gs_memory_type_ptr_t pstype,client_name_t cname)636 i_alloc_struct_array(gs_memory_t * mem, uint num_elements,
637 gs_memory_type_ptr_t pstype, client_name_t cname)
638 {
639 gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
640 obj_header_t *obj;
641
642 ALLOC_CHECK_SIZE(pstype);
643 #ifdef DEBUG
644 if (pstype->enum_ptrs == basic_enum_ptrs) {
645 dprintf2(" i_alloc_struct_array: called with incorrect structure type (not element), struct='%s', client='%s'\n",
646 pstype->sname, cname);
647 return NULL; /* fail */
648 }
649 #endif
650 obj = alloc_obj(imem,
651 (ulong) num_elements * pstype->ssize,
652 pstype, ALLOC_DIRECT, cname);
653 if_debug7('A', "[a%d:+<.]%s %s*(%lu=%u*%u) = 0x%lx\n",
654 alloc_trace_space(imem), client_name_string(cname),
655 struct_type_name_string(pstype),
656 (ulong) num_elements * pstype->ssize,
657 num_elements, pstype->ssize, (ulong) obj);
658 return (char *)obj;
659 }
660 static void *
i_alloc_struct_array_immovable(gs_memory_t * mem,uint num_elements,gs_memory_type_ptr_t pstype,client_name_t cname)661 i_alloc_struct_array_immovable(gs_memory_t * mem, uint num_elements,
662 gs_memory_type_ptr_t pstype, client_name_t cname)
663 {
664 gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
665 obj_header_t *obj;
666
667 ALLOC_CHECK_SIZE(pstype);
668 obj = alloc_obj(imem,
669 (ulong) num_elements * pstype->ssize,
670 pstype, ALLOC_IMMOVABLE | ALLOC_DIRECT, cname);
671 if_debug7('A', "[a%d|+<.]%s %s*(%lu=%u*%u) = 0x%lx\n",
672 alloc_trace_space(imem), client_name_string(cname),
673 struct_type_name_string(pstype),
674 (ulong) num_elements * pstype->ssize,
675 num_elements, pstype->ssize, (ulong) obj);
676 return (char *)obj;
677 }
678 static void *
i_resize_object(gs_memory_t * mem,void * obj,uint new_num_elements,client_name_t cname)679 i_resize_object(gs_memory_t * mem, void *obj, uint new_num_elements,
680 client_name_t cname)
681 {
682 gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
683 obj_header_t *pp = (obj_header_t *) obj - 1;
684 gs_memory_type_ptr_t pstype = pp->o_type;
685 ulong old_size = pre_obj_contents_size(pp);
686 ulong new_size = (ulong) pstype->ssize * new_num_elements;
687 ulong old_size_rounded = obj_align_round(old_size);
688 ulong new_size_rounded = obj_align_round(new_size);
689 void *new_obj = NULL;
690
691 if (old_size_rounded == new_size_rounded) {
692 pp->o_size = new_size;
693 new_obj = obj;
694 } else
695 if ((byte *)obj + old_size_rounded == imem->cc.cbot &&
696 imem->cc.ctop - (byte *)obj >= new_size_rounded ) {
697 imem->cc.cbot = (byte *)obj + new_size_rounded;
698 pp->o_size = new_size;
699 new_obj = obj;
700 } else /* try and trim the object -- but only if room for a dummy header */
701 if (new_size_rounded + sizeof(obj_header_t) <= old_size_rounded) {
702 trim_obj(imem, obj, new_size, (chunk_t *)0);
703 new_obj = obj;
704 }
705 if (new_obj) {
706 if_debug8('A', "[a%d:%c%c ]%s %s(%lu=>%lu) 0x%lx\n",
707 alloc_trace_space(imem),
708 (new_size > old_size ? '>' : '<'),
709 (pstype == &st_bytes ? 'b' : '<'),
710 client_name_string(cname),
711 struct_type_name_string(pstype),
712 old_size, new_size, (ulong) obj);
713 return new_obj;
714 }
715 /* Punt. */
716 new_obj = gs_alloc_struct_array(mem, new_num_elements, void,
717 pstype, cname);
718 if (new_obj == 0)
719 return 0;
720 memcpy(new_obj, obj, min(old_size, new_size));
721 gs_free_object(mem, obj, cname);
722 return new_obj;
723 }
724 static void
i_free_object(gs_memory_t * mem,void * ptr,client_name_t cname)725 i_free_object(gs_memory_t * mem, void *ptr, client_name_t cname)
726 {
727 gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
728 obj_header_t *pp;
729 gs_memory_type_ptr_t pstype;
730
731 struct_proc_finalize((*finalize));
732 uint size, rounded_size;
733
734 if (ptr == 0)
735 return;
736 pp = (obj_header_t *) ptr - 1;
737 pstype = pp->o_type;
738 #ifdef DEBUG
739 if (gs_debug_c('?')) {
740 chunk_locator_t cld;
741
742 if (pstype == &st_free) {
743 lprintf2("%s: object 0x%lx already free!\n",
744 client_name_string(cname), (ulong) ptr);
745 return; /*gs_abort(); */
746 }
747 /* Check that this allocator owns the object being freed. */
748 cld.memory = imem;
749 while ((cld.cp = cld.memory->clast),
750 !chunk_locate_ptr(ptr, &cld)
751 ) {
752 if (!cld.memory->saved) {
753 lprintf3("%s: freeing 0x%lx, not owned by memory 0x%lx!\n",
754 client_name_string(cname), (ulong) ptr,
755 (ulong) mem);
756 return; /*gs_abort(); */
757 }
758 /****** HACK: we know the saved state is the first ******
759 ****** member of an alloc_save_t. ******/
760 cld.memory = (gs_ref_memory_t *) cld.memory->saved;
761 }
762 /* Check that the object is in the allocated region. */
763 if (cld.memory == imem && cld.cp == imem->pcc)
764 cld.cp = &imem->cc;
765 if (!(PTR_BETWEEN((const byte *)pp, cld.cp->cbase,
766 cld.cp->cbot))
767 ) {
768 lprintf5("%s: freeing 0x%lx,\n\toutside chunk 0x%lx cbase=0x%lx, cbot=0x%lx!\n",
769 client_name_string(cname), (ulong) ptr,
770 (ulong) cld.cp, (ulong) cld.cp->cbase,
771 (ulong) cld.cp->cbot);
772 return; /*gs_abort(); */
773 }
774 }
775 #endif
776 size = pre_obj_contents_size(pp);
777 rounded_size = obj_align_round(size);
778 finalize = pstype->finalize;
779 if (finalize != 0) {
780 if_debug3('u', "[u]finalizing %s 0x%lx (%s)\n",
781 struct_type_name_string(pstype),
782 (ulong) ptr, client_name_string(cname));
783 (*finalize) (ptr);
784 }
785 if ((byte *) ptr + rounded_size == imem->cc.cbot) {
786 alloc_trace(":-o ", imem, cname, pstype, size, ptr);
787 gs_alloc_fill(ptr, gs_alloc_fill_free, size);
788 imem->cc.cbot = (byte *) pp;
789 /* IFF this object is adjacent to (or below) the byte after the
790 * highest free object, do the consolidation within this chunk. */
791 if ((byte *)pp <= imem->cc.int_freed_top) {
792 consolidate_chunk_free(&(imem->cc), imem);
793 }
794 return;
795 }
796 if (pp->o_alone) {
797 /*
798 * We gave this object its own chunk. Free the entire chunk,
799 * unless it belongs to an older save level, in which case
800 * we mustn't overwrite it.
801 */
802 chunk_locator_t cl;
803
804 #ifdef DEBUG
805 {
806 chunk_locator_t cld;
807
808 cld.memory = imem;
809 cld.cp = 0;
810 if (gs_debug_c('a'))
811 alloc_trace(
812 (chunk_locate_ptr(ptr, &cld) ? ":-oL" : ":-o~"),
813 imem, cname, pstype, size, ptr);
814 }
815 #endif
816 cl.memory = imem;
817 cl.cp = 0;
818 if (chunk_locate_ptr(ptr, &cl)) {
819 if (!imem->is_controlled)
820 alloc_free_chunk(cl.cp, imem);
821 return;
822 }
823 /* Don't overwrite even if gs_alloc_debug is set. */
824 }
825 if (rounded_size >= sizeof(obj_header_t *)) {
826 /*
827 * Put the object on a freelist, unless it belongs to
828 * an older save level, in which case we mustn't
829 * overwrite it.
830 */
831 imem->cfreed.memory = imem;
832 if (chunk_locate(ptr, &imem->cfreed)) {
833 obj_header_t **pfl;
834
835 if (size > max_freelist_size) {
836 pfl = &imem->freelists[LARGE_FREELIST_INDEX];
837 if (rounded_size > imem->largest_free_size)
838 imem->largest_free_size = rounded_size;
839 } else {
840 pfl = &imem->freelists[(size + obj_align_mask) >>
841 log2_obj_align_mod];
842 }
843 /* keep track of highest object on a freelist */
844 if ((byte *)pp >= imem->cc.int_freed_top)
845 imem->cc.int_freed_top = (byte *)ptr + rounded_size;
846 pp->o_type = &st_free; /* don't confuse GC */
847 o_set_unmarked(pp);
848 gs_alloc_fill(ptr, gs_alloc_fill_free, size);
849 *(obj_header_t **) ptr = *pfl;
850 *pfl = (obj_header_t *) ptr;
851 alloc_trace((size > max_freelist_size ? ":-oF" : ":-of"),
852 imem, cname, pstype, size, ptr);
853 return;
854 }
855 /* Don't overwrite even if gs_alloc_debug is set. */
856 } else {
857 pp->o_type = &st_free; /* don't confuse GC */
858 gs_alloc_fill(ptr, gs_alloc_fill_free, size);
859 }
860 alloc_trace(":-o#", imem, cname, pstype, size, ptr);
861 imem->lost.objects += obj_size_round(size);
862 }
863 static byte *
i_alloc_string(gs_memory_t * mem,uint nbytes,client_name_t cname)864 i_alloc_string(gs_memory_t * mem, uint nbytes, client_name_t cname)
865 {
866 gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
867 byte *str;
868 /*
869 * Cycle through the chunks at the current save level, starting
870 * with the currently open one.
871 */
872 chunk_t *cp_orig = imem->pcc;
873
874 if (cp_orig == 0) {
875 /* Open an arbitrary chunk. */
876 cp_orig = imem->pcc = imem->cfirst;
877 alloc_open_chunk(imem);
878 }
879 top:
880 if (imem->cc.ctop - imem->cc.cbot > nbytes) {
881 if_debug4('A', "[a%d:+> ]%s(%u) = 0x%lx\n",
882 alloc_trace_space(imem), client_name_string(cname), nbytes,
883 (ulong) (imem->cc.ctop - nbytes));
884 str = imem->cc.ctop -= nbytes;
885 gs_alloc_fill(str, gs_alloc_fill_alloc, nbytes);
886 return str;
887 }
888 /* Try the next chunk. */
889 {
890 chunk_t *cp = imem->cc.cnext;
891
892 alloc_close_chunk(imem);
893 if (cp == 0)
894 cp = imem->cfirst;
895 imem->pcc = cp;
896 alloc_open_chunk(imem);
897 if (cp != cp_orig)
898 goto top;
899 }
900 if (nbytes > string_space_quanta(max_uint - sizeof(chunk_head_t)) *
901 string_data_quantum
902 ) { /* Can't represent the size in a uint! */
903 return 0;
904 }
905 if (nbytes >= imem->large_size) { /* Give it a chunk all its own. */
906 return i_alloc_string_immovable(mem, nbytes, cname);
907 } else { /* Add another chunk. */
908 chunk_t *cp =
909 alloc_acquire_chunk(imem, (ulong) imem->chunk_size, true, "chunk");
910
911 if (cp == 0)
912 return 0;
913 alloc_close_chunk(imem);
914 imem->pcc = cp;
915 imem->cc = *imem->pcc;
916 gs_alloc_fill(imem->cc.cbase, gs_alloc_fill_free,
917 imem->cc.climit - imem->cc.cbase);
918 goto top;
919 }
920 }
921 static byte *
i_alloc_string_immovable(gs_memory_t * mem,uint nbytes,client_name_t cname)922 i_alloc_string_immovable(gs_memory_t * mem, uint nbytes, client_name_t cname)
923 {
924 gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
925 byte *str;
926 /* Give it a chunk all its own. */
927 uint asize = string_chunk_space(nbytes) + sizeof(chunk_head_t);
928 chunk_t *cp = alloc_acquire_chunk(imem, (ulong) asize, true,
929 "large string chunk");
930
931 if (cp == 0)
932 return 0;
933 str = cp->ctop = cp->climit - nbytes;
934 if_debug4('a', "[a%d|+>L]%s(%u) = 0x%lx\n",
935 alloc_trace_space(imem), client_name_string(cname), nbytes,
936 (ulong) str);
937 gs_alloc_fill(str, gs_alloc_fill_alloc, nbytes);
938 return str;
939 }
940 static byte *
i_resize_string(gs_memory_t * mem,byte * data,uint old_num,uint new_num,client_name_t cname)941 i_resize_string(gs_memory_t * mem, byte * data, uint old_num, uint new_num,
942 client_name_t cname)
943 {
944 gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
945 byte *ptr;
946
947 if (old_num == new_num) /* same size returns the same string */
948 return data;
949 if (data == imem->cc.ctop && /* bottom-most string */
950 (new_num < old_num ||
951 imem->cc.ctop - imem->cc.cbot > new_num - old_num)
952 ) { /* Resize in place. */
953 ptr = data + old_num - new_num;
954 if_debug6('A', "[a%d:%c> ]%s(%u->%u) 0x%lx\n",
955 alloc_trace_space(imem),
956 (new_num > old_num ? '>' : '<'),
957 client_name_string(cname), old_num, new_num,
958 (ulong) ptr);
959 imem->cc.ctop = ptr;
960 memmove(ptr, data, min(old_num, new_num));
961 #ifdef DEBUG
962 if (new_num > old_num)
963 gs_alloc_fill(ptr + old_num, gs_alloc_fill_alloc,
964 new_num - old_num);
965 else
966 gs_alloc_fill(data, gs_alloc_fill_free, old_num - new_num);
967 #endif
968 } else
969 if (new_num < old_num) {
970 /* trim the string and create a free space hole */
971 ptr = data;
972 imem->lost.strings += old_num - new_num;
973 gs_alloc_fill(data + new_num, gs_alloc_fill_free,
974 old_num - new_num);
975 if_debug5('A', "[a%d:<> ]%s(%u->%u) 0x%lx\n",
976 alloc_trace_space(imem), client_name_string(cname),
977 old_num, new_num, (ulong)ptr);
978 } else { /* Punt. */
979 ptr = gs_alloc_string(mem, new_num, cname);
980 if (ptr == 0)
981 return 0;
982 memcpy(ptr, data, min(old_num, new_num));
983 gs_free_string(mem, data, old_num, cname);
984 }
985 return ptr;
986 }
987
988 static void
i_free_string(gs_memory_t * mem,byte * data,uint nbytes,client_name_t cname)989 i_free_string(gs_memory_t * mem, byte * data, uint nbytes,
990 client_name_t cname)
991 {
992 gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
993 if (data == imem->cc.ctop) {
994 if_debug4('A', "[a%d:-> ]%s(%u) 0x%lx\n",
995 alloc_trace_space(imem), client_name_string(cname), nbytes,
996 (ulong) data);
997 imem->cc.ctop += nbytes;
998 } else {
999 if_debug4('A', "[a%d:->#]%s(%u) 0x%lx\n",
1000 alloc_trace_space(imem), client_name_string(cname), nbytes,
1001 (ulong) data);
1002 imem->lost.strings += nbytes;
1003 }
1004 gs_alloc_fill(data, gs_alloc_fill_free, nbytes);
1005 }
1006
1007 static gs_memory_t *
i_stable(gs_memory_t * mem)1008 i_stable(gs_memory_t *mem)
1009 {
1010 return mem->stable_memory;
1011 }
1012
1013 static void
i_status(gs_memory_t * mem,gs_memory_status_t * pstat)1014 i_status(gs_memory_t * mem, gs_memory_status_t * pstat)
1015 {
1016 gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
1017 ulong unused = imem->lost.refs + imem->lost.strings;
1018 ulong inner = 0;
1019
1020 alloc_close_chunk(imem);
1021 /* Add up unallocated space within each chunk. */
1022 /* Also keep track of space allocated to inner chunks, */
1023 /* which are included in previous_status.allocated. */
1024 {
1025 const chunk_t *cp = imem->cfirst;
1026
1027 while (cp != 0) {
1028 unused += cp->ctop - cp->cbot;
1029 if (cp->outer)
1030 inner += cp->cend - (byte *) cp->chead;
1031 cp = cp->cnext;
1032 }
1033 }
1034 unused += compute_free_objects(imem);
1035 pstat->used = imem->allocated + inner - unused +
1036 imem->previous_status.used;
1037 pstat->allocated = imem->allocated +
1038 imem->previous_status.allocated;
1039 }
1040
1041 static void
i_enable_free(gs_memory_t * mem,bool enable)1042 i_enable_free(gs_memory_t * mem, bool enable)
1043 {
1044 if (enable)
1045 mem->procs.free_object = i_free_object,
1046 mem->procs.free_string = i_free_string;
1047 else
1048 mem->procs.free_object = gs_ignore_free_object,
1049 mem->procs.free_string = gs_ignore_free_string;
1050 }
1051
1052 /* ------ Internal procedures ------ */
1053
1054 /* Compute the amount of free object space by scanning free lists. */
1055 static ulong
compute_free_objects(gs_ref_memory_t * mem)1056 compute_free_objects(gs_ref_memory_t * mem)
1057 {
1058 ulong unused = mem->lost.objects;
1059 int i;
1060
1061 /* Add up space on free lists. */
1062 for (i = 0; i < num_freelists; i++) {
1063 const obj_header_t *pfree;
1064
1065 for (pfree = mem->freelists[i]; pfree != 0;
1066 pfree = *(const obj_header_t * const *)pfree
1067 )
1068 unused += obj_align_round(pfree[-1].o_size);
1069 }
1070 return unused;
1071 }
1072
1073 /* Allocate an object from the large-block freelist. */
1074 static obj_header_t * /* rets obj if allocated, else 0 */
large_freelist_alloc(gs_ref_memory_t * mem,uint size)1075 large_freelist_alloc(gs_ref_memory_t *mem, uint size)
1076 {
1077 /* Scan large object freelist. We'll grab an object up to 1/8 bigger */
1078 /* right away, else use best fit of entire scan. */
1079 uint aligned_size = obj_align_round(size);
1080 uint aligned_min_size = aligned_size + sizeof(obj_header_t);
1081 uint aligned_max_size =
1082 aligned_min_size + obj_align_round(aligned_min_size / 8);
1083 obj_header_t *best_fit = 0;
1084 obj_header_t **best_fit_prev = NULL; /* Initialize against indeteminizm. */
1085 uint best_fit_size = max_uint;
1086 obj_header_t *pfree;
1087 obj_header_t **ppfprev = &mem->freelists[LARGE_FREELIST_INDEX];
1088 uint largest_size = 0;
1089
1090 if (aligned_size > mem->largest_free_size)
1091 return 0; /* definitely no block large enough */
1092
1093 while ((pfree = *ppfprev) != 0) {
1094 uint free_size = obj_align_round(pfree[-1].o_size);
1095
1096 if (free_size == aligned_size ||
1097 (free_size >= aligned_min_size && free_size < best_fit_size)
1098 ) {
1099 best_fit = pfree;
1100 best_fit_prev = ppfprev;
1101 best_fit_size = pfree[-1].o_size;
1102 if (best_fit_size <= aligned_max_size)
1103 break; /* good enough fit to spare scan of entire list */
1104 }
1105 ppfprev = (obj_header_t **) pfree;
1106 if (free_size > largest_size)
1107 largest_size = free_size;
1108 }
1109 if (best_fit == 0) {
1110 /*
1111 * No single free chunk is large enough, but since we scanned the
1112 * entire list, we now have an accurate updated value for
1113 * largest_free_size.
1114 */
1115 mem->largest_free_size = largest_size;
1116 return 0;
1117 }
1118
1119 /* Remove from freelist & return excess memory to free */
1120 *best_fit_prev = *(obj_header_t **)best_fit;
1121 trim_obj(mem, best_fit, aligned_size, (chunk_t *)0);
1122
1123 /* Pre-init block header; o_alone & o_type are already init'd */
1124 best_fit[-1].o_size = size;
1125
1126 return best_fit;
1127 }
1128
1129 /* Allocate an object. This handles all but the fastest, simplest case. */
1130 static obj_header_t *
alloc_obj(gs_ref_memory_t * mem,ulong lsize,gs_memory_type_ptr_t pstype,alloc_flags_t flags,client_name_t cname)1131 alloc_obj(gs_ref_memory_t *mem, ulong lsize, gs_memory_type_ptr_t pstype,
1132 alloc_flags_t flags, client_name_t cname)
1133 {
1134 obj_header_t *ptr;
1135
1136 if (lsize >= mem->large_size || (flags & ALLOC_IMMOVABLE)) {
1137 /*
1138 * Give the object a chunk all its own. Note that this case does
1139 * not occur if is_controlled is true.
1140 */
1141 ulong asize =
1142 ((lsize + obj_align_mask) & -obj_align_mod) +
1143 sizeof(obj_header_t);
1144 chunk_t *cp =
1145 alloc_acquire_chunk(mem, asize + sizeof(chunk_head_t), false,
1146 "large object chunk");
1147
1148 if (
1149 #if arch_sizeof_long > arch_sizeof_int
1150 asize > max_uint
1151 #else
1152 asize < lsize
1153 #endif
1154 )
1155 return 0;
1156 if (cp == 0)
1157 return 0;
1158 ptr = (obj_header_t *) cp->cbot;
1159 cp->cbot += asize;
1160 ptr->o_alone = 1;
1161 ptr->o_size = lsize;
1162 } else {
1163 /*
1164 * Cycle through the chunks at the current save level, starting
1165 * with the currently open one.
1166 */
1167 chunk_t *cp_orig = mem->pcc;
1168 uint asize = obj_size_round((uint) lsize);
1169 bool allocate_success = false;
1170
1171 if (lsize > max_freelist_size && (flags & ALLOC_DIRECT)) {
1172 /* We haven't checked the large block freelist yet. */
1173 if ((ptr = large_freelist_alloc(mem, lsize)) != 0) {
1174 --ptr; /* must point to header */
1175 goto done;
1176 }
1177 }
1178
1179 if (cp_orig == 0) {
1180 /* Open an arbitrary chunk. */
1181 cp_orig = mem->pcc = mem->cfirst;
1182 alloc_open_chunk(mem);
1183 }
1184
1185 #define CAN_ALLOC_AT_END(cp)\
1186 ((cp)->ctop - (byte *) (ptr = (obj_header_t *) (cp)->cbot)\
1187 > asize + sizeof(obj_header_t))
1188
1189 do {
1190 if (CAN_ALLOC_AT_END(&mem->cc)) {
1191 allocate_success = true;
1192 break;
1193 } else if (mem->is_controlled) {
1194 /* Try consolidating free space. */
1195 gs_consolidate_free((gs_memory_t *)mem);
1196 if (CAN_ALLOC_AT_END(&mem->cc)) {
1197 allocate_success = true;
1198 break;
1199 }
1200 }
1201 /* No luck, go on to the next chunk. */
1202 {
1203 chunk_t *cp = mem->cc.cnext;
1204
1205 alloc_close_chunk(mem);
1206 if (cp == 0)
1207 cp = mem->cfirst;
1208 mem->pcc = cp;
1209 alloc_open_chunk(mem);
1210 }
1211 } while (mem->pcc != cp_orig);
1212
1213 #ifdef CONSOLIDATE_BEFORE_ADDING_CHUNK
1214 if (!allocate_success) {
1215 /*
1216 * Try consolidating free space before giving up.
1217 * It's not clear this is a good idea, since it requires quite
1218 * a lot of computation and doesn't seem to improve things much.
1219 */
1220 if (!mem->is_controlled) { /* already did this if controlled */
1221 chunk_t *cp = cp_orig;
1222
1223 alloc_close_chunk(mem);
1224 do {
1225 consolidate_chunk_free(cp, mem);
1226 if (CAN_ALLOC_AT_END(cp)) {
1227 mem->pcc = cp;
1228 alloc_open_chunk(mem);
1229 allocate_success = true;
1230 break;
1231 }
1232 if ((cp = cp->cnext) == 0)
1233 cp = mem->cfirst;
1234 } while (cp != cp_orig);
1235 }
1236 }
1237 #endif
1238
1239 #undef CAN_ALLOC_AT_END
1240
1241 if (!allocate_success) {
1242 /* Add another chunk. */
1243 chunk_t *cp =
1244 alloc_add_chunk(mem, (ulong)mem->chunk_size, "chunk");
1245
1246 if (cp) {
1247 /* mem->pcc == cp, mem->cc == *mem->pcc. */
1248 ptr = (obj_header_t *)cp->cbot;
1249 allocate_success = true;
1250 }
1251 }
1252
1253 /*
1254 * If no success, try to scavenge from low free memory. This is
1255 * only enabled for controlled memory (currently only async
1256 * renderer) because it's too much work to prevent it from
1257 * examining outer save levels in the general case.
1258 */
1259 if (allocate_success)
1260 mem->cc.cbot = (byte *) ptr + asize;
1261 else if (!mem->is_controlled ||
1262 (ptr = scavenge_low_free(mem, (uint)lsize)) == 0)
1263 return 0; /* allocation failed */
1264 ptr->o_alone = 0;
1265 ptr->o_size = (uint) lsize;
1266 }
1267 done:
1268 ptr->o_type = pstype;
1269 # if IGC_PTR_STABILITY_CHECK
1270 ptr->d.o.space_id = mem->space_id;
1271 # endif
1272 ptr++;
1273 gs_alloc_fill(ptr, gs_alloc_fill_alloc, lsize);
1274 return ptr;
1275 }
1276
1277 /*
1278 * Consolidate free objects contiguous to free space at cbot onto the cbot
1279 * area. Also keep track of end of highest internal free object
1280 * (int_freed_top).
1281 */
1282 static void
consolidate_chunk_free(chunk_t * cp,gs_ref_memory_t * mem)1283 consolidate_chunk_free(chunk_t *cp, gs_ref_memory_t *mem)
1284 {
1285 obj_header_t *begin_free = 0;
1286
1287 cp->int_freed_top = cp->cbase; /* below all objects in chunk */
1288 SCAN_CHUNK_OBJECTS(cp)
1289 DO_ALL
1290 if (pre->o_type == &st_free) {
1291 if (begin_free == 0)
1292 begin_free = pre;
1293 } else {
1294 if (begin_free)
1295 cp->int_freed_top = (byte *)pre; /* first byte following internal free */
1296 begin_free = 0;
1297 }
1298 END_OBJECTS_SCAN
1299 if (begin_free) {
1300 /* We found free objects at the top of the object area. */
1301 /* Remove the free objects from the freelists. */
1302 remove_range_from_freelist(mem, begin_free, cp->cbot);
1303 if_debug4('a', "[a]resetting chunk 0x%lx cbot from 0x%lx to 0x%lx (%lu free)\n",
1304 (ulong) cp, (ulong) cp->cbot, (ulong) begin_free,
1305 (ulong) ((byte *) cp->cbot - (byte *) begin_free));
1306 cp->cbot = (byte *) begin_free;
1307 }
1308 }
1309
1310 /* Consolidate free objects. */
1311 void
ialloc_consolidate_free(gs_ref_memory_t * mem)1312 ialloc_consolidate_free(gs_ref_memory_t *mem)
1313 {
1314 chunk_t *cp;
1315 chunk_t *cprev;
1316
1317 alloc_close_chunk(mem);
1318
1319 /* Visit chunks in reverse order to encourage LIFO behavior. */
1320 for (cp = mem->clast; cp != 0; cp = cprev) {
1321 cprev = cp->cprev;
1322 consolidate_chunk_free(cp, mem);
1323 if (cp->cbot == cp->cbase && cp->ctop == cp->climit) {
1324 /* The entire chunk is free. */
1325 chunk_t *cnext = cp->cnext;
1326
1327 if (!mem->is_controlled) {
1328 alloc_free_chunk(cp, mem);
1329 if (mem->pcc == cp)
1330 mem->pcc =
1331 (cnext == 0 ? cprev : cprev == 0 ? cnext :
1332 cprev->cbot - cprev->ctop >
1333 cnext->cbot - cnext->ctop ? cprev :
1334 cnext);
1335 }
1336 }
1337 }
1338 alloc_open_chunk(mem);
1339 }
1340 static void
i_consolidate_free(gs_memory_t * mem)1341 i_consolidate_free(gs_memory_t *mem)
1342 {
1343 ialloc_consolidate_free((gs_ref_memory_t *)mem);
1344 }
1345
1346 /* try to free-up given amount of space from freespace below chunk base */
1347 static obj_header_t * /* returns uninitialized object hdr, NULL if none found */
scavenge_low_free(gs_ref_memory_t * mem,unsigned request_size)1348 scavenge_low_free(gs_ref_memory_t *mem, unsigned request_size)
1349 {
1350 /* find 1st range of memory that can be glued back together to fill request */
1351 obj_header_t *found_pre = 0;
1352
1353 /* Visit chunks in forward order */
1354 obj_header_t *begin_free = 0;
1355 uint found_free;
1356 uint request_size_rounded = obj_size_round(request_size);
1357 uint need_free = request_size_rounded + sizeof(obj_header_t); /* room for GC's dummy hdr */
1358 chunk_t *cp;
1359
1360 for (cp = mem->cfirst; cp != 0; cp = cp->cnext) {
1361 begin_free = 0;
1362 found_free = 0;
1363 SCAN_CHUNK_OBJECTS(cp)
1364 DO_ALL
1365 if (pre->o_type == &st_free) {
1366 if (begin_free == 0) {
1367 found_free = 0;
1368 begin_free = pre;
1369 }
1370 found_free += pre_obj_rounded_size(pre);
1371 if (begin_free != 0 && found_free >= need_free)
1372 break;
1373 } else
1374 begin_free = 0;
1375 END_OBJECTS_SCAN_NO_ABORT
1376
1377 /* Found sufficient range of empty memory */
1378 if (begin_free != 0 && found_free >= need_free) {
1379
1380 /* Fish found pieces out of various freelists */
1381 remove_range_from_freelist(mem, (char*)begin_free,
1382 (char*)begin_free + found_free);
1383
1384 /* Prepare found object */
1385 found_pre = begin_free;
1386 found_pre->o_type = &st_free; /* don't confuse GC if gets lost */
1387 found_pre->o_size = found_free - sizeof(obj_header_t);
1388
1389 /* Chop off excess tail piece & toss it back into free pool */
1390 trim_obj(mem, found_pre + 1, request_size, cp);
1391 }
1392 }
1393 return found_pre;
1394 }
1395
1396 /* Remove range of memory from a mem's freelists */
1397 static void
remove_range_from_freelist(gs_ref_memory_t * mem,void * bottom,void * top)1398 remove_range_from_freelist(gs_ref_memory_t *mem, void* bottom, void* top)
1399 {
1400 int num_free[num_freelists];
1401 int smallest = num_freelists, largest = -1;
1402 const obj_header_t *cur;
1403 uint size;
1404 int i;
1405 uint removed = 0;
1406
1407 /*
1408 * Scan from bottom to top, a range containing only free objects,
1409 * counting the number of objects of each size.
1410 */
1411
1412 for (cur = bottom; cur != top;
1413 cur = (const obj_header_t *)
1414 ((const byte *)cur + obj_size_round(size))
1415 ) {
1416 size = cur->o_size;
1417 i = (size > max_freelist_size ? LARGE_FREELIST_INDEX :
1418 (size + obj_align_mask) >> log2_obj_align_mod);
1419 if (i < smallest) {
1420 /*
1421 * 0-length free blocks aren't kept on any list, because
1422 * they don't have room for a pointer.
1423 */
1424 if (i == 0)
1425 continue;
1426 if (smallest < num_freelists)
1427 memset(&num_free[i], 0, (smallest - i) * sizeof(int));
1428 else
1429 num_free[i] = 0;
1430 smallest = i;
1431 }
1432 if (i > largest) {
1433 if (largest >= 0)
1434 memset(&num_free[largest + 1], 0, (i - largest) * sizeof(int));
1435 largest = i;
1436 }
1437 num_free[i]++;
1438 }
1439
1440 /*
1441 * Remove free objects from the freelists, adjusting lost.objects by
1442 * subtracting the size of the region being processed minus the amount
1443 * of space reclaimed.
1444 */
1445
1446 for (i = smallest; i <= largest; i++) {
1447 int count = num_free[i];
1448 obj_header_t *pfree;
1449 obj_header_t **ppfprev;
1450
1451 if (!count)
1452 continue;
1453 ppfprev = &mem->freelists[i];
1454 for (;;) {
1455 pfree = *ppfprev;
1456 if (PTR_GE(pfree, bottom) && PTR_LT(pfree, top)) {
1457 /* We're removing an object. */
1458 *ppfprev = *(obj_header_t **) pfree;
1459 removed += obj_align_round(pfree[-1].o_size);
1460 if (!--count)
1461 break;
1462 } else
1463 ppfprev = (obj_header_t **) pfree;
1464 }
1465 }
1466 mem->lost.objects -= (char*)top - (char*)bottom - removed;
1467 }
1468
1469 /* Trim a memory object down to a given size */
1470 static void
trim_obj(gs_ref_memory_t * mem,obj_header_t * obj,uint size,chunk_t * cp)1471 trim_obj(gs_ref_memory_t *mem, obj_header_t *obj, uint size, chunk_t *cp)
1472 /* Obj must have rounded size == req'd size, or have enough room for */
1473 /* trailing dummy obj_header */
1474 {
1475 uint rounded_size = obj_align_round(size);
1476 obj_header_t *pre_obj = obj - 1;
1477 obj_header_t *excess_pre = (obj_header_t*)((char*)obj + rounded_size);
1478 uint old_rounded_size = obj_align_round(pre_obj->o_size);
1479 uint excess_size = old_rounded_size - rounded_size - sizeof(obj_header_t);
1480
1481 /* trim object's size to desired */
1482 pre_obj->o_size = size;
1483 if (old_rounded_size == rounded_size)
1484 return; /* nothing more to do here */
1485 /*
1486 * If the object is alone in its chunk, move cbot to point to the end
1487 * of the object.
1488 */
1489 if (pre_obj->o_alone) {
1490 if (!cp) {
1491 mem->cfreed.memory = mem;
1492 if (chunk_locate(obj, &mem->cfreed)) {
1493 cp = mem->cfreed.cp;
1494 }
1495 }
1496 if (cp) {
1497 #ifdef DEBUG
1498 if (cp->cbot != (byte *)obj + old_rounded_size) {
1499 lprintf3("resizing 0x%lx, old size %u, new size %u, cbot wrong!\n",
1500 (ulong)obj, old_rounded_size, size);
1501 /* gs_abort */
1502 } else
1503 #endif
1504 {
1505 cp->cbot = (byte *)excess_pre;
1506 return;
1507 }
1508 }
1509 /*
1510 * Something very weird is going on. This probably shouldn't
1511 * ever happen, but if it does....
1512 */
1513 pre_obj->o_alone = 0;
1514 }
1515 /* make excess into free obj */
1516 excess_pre->o_type = &st_free; /* don't confuse GC */
1517 excess_pre->o_size = excess_size;
1518 excess_pre->o_alone = 0;
1519 if (excess_size >= obj_align_mod) {
1520 /* Put excess object on a freelist */
1521 obj_header_t **pfl;
1522
1523 if ((byte *)excess_pre >= mem->cc.int_freed_top)
1524 mem->cc.int_freed_top = (byte *)excess_pre + excess_size;
1525 if (excess_size <= max_freelist_size)
1526 pfl = &mem->freelists[(excess_size + obj_align_mask) >>
1527 log2_obj_align_mod];
1528 else {
1529 uint rounded_size = obj_align_round(excess_size);
1530
1531 pfl = &mem->freelists[LARGE_FREELIST_INDEX];
1532 if (rounded_size > mem->largest_free_size)
1533 mem->largest_free_size = rounded_size;
1534 }
1535 *(obj_header_t **) (excess_pre + 1) = *pfl;
1536 *pfl = excess_pre + 1;
1537 mem->cfreed.memory = mem;
1538 } else {
1539 /* excess piece will be "lost" memory */
1540 mem->lost.objects += excess_size + sizeof(obj_header_t);
1541 }
1542 }
1543
1544 /* ================ Roots ================ */
1545
1546 /* Register a root. */
1547 static int
i_register_root(gs_memory_t * mem,gs_gc_root_t * rp,gs_ptr_type_t ptype,void ** up,client_name_t cname)1548 i_register_root(gs_memory_t * mem, gs_gc_root_t * rp, gs_ptr_type_t ptype,
1549 void **up, client_name_t cname)
1550 {
1551 gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
1552
1553 if (rp == NULL) {
1554 rp = gs_raw_alloc_struct_immovable(imem->non_gc_memory, &st_gc_root_t,
1555 "i_register_root");
1556 if (rp == 0)
1557 return_error(gs_error_VMerror);
1558 rp->free_on_unregister = true;
1559 } else
1560 rp->free_on_unregister = false;
1561 if_debug3('8', "[8]register root(%s) 0x%lx -> 0x%lx\n",
1562 client_name_string(cname), (ulong)rp, (ulong)up);
1563 rp->ptype = ptype;
1564 rp->p = up;
1565 rp->next = imem->roots;
1566 imem->roots = rp;
1567 return 0;
1568 }
1569
1570 /* Unregister a root. */
1571 static void
i_unregister_root(gs_memory_t * mem,gs_gc_root_t * rp,client_name_t cname)1572 i_unregister_root(gs_memory_t * mem, gs_gc_root_t * rp, client_name_t cname)
1573 {
1574 gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
1575 gs_gc_root_t **rpp = &imem->roots;
1576
1577 if_debug2('8', "[8]unregister root(%s) 0x%lx\n",
1578 client_name_string(cname), (ulong) rp);
1579 while (*rpp != rp)
1580 rpp = &(*rpp)->next;
1581 *rpp = (*rpp)->next;
1582 if (rp->free_on_unregister)
1583 gs_free_object(imem->non_gc_memory, rp, "i_unregister_root");
1584 }
1585
1586 /* ================ Chunks ================ */
1587
1588 public_st_chunk();
1589
1590 /* Insert a chunk in the chain. This is exported for the GC and for */
1591 /* the forget_save operation. */
1592 void
alloc_link_chunk(chunk_t * cp,gs_ref_memory_t * imem)1593 alloc_link_chunk(chunk_t * cp, gs_ref_memory_t * imem)
1594 {
1595 byte *cdata = cp->cbase;
1596 chunk_t *icp;
1597 chunk_t *prev;
1598
1599 /*
1600 * Allocators tend to allocate in either ascending or descending
1601 * address order. The loop will handle the latter well; check for
1602 * the former first.
1603 */
1604 if (imem->clast && PTR_GE(cdata, imem->clast->ctop))
1605 icp = 0;
1606 else
1607 for (icp = imem->cfirst; icp != 0 && PTR_GE(cdata, icp->ctop);
1608 icp = icp->cnext
1609 );
1610 cp->cnext = icp;
1611 if (icp == 0) { /* add at end of chain */
1612 prev = imem->clast;
1613 imem->clast = cp;
1614 } else { /* insert before icp */
1615 prev = icp->cprev;
1616 icp->cprev = cp;
1617 }
1618 cp->cprev = prev;
1619 if (prev == 0)
1620 imem->cfirst = cp;
1621 else
1622 prev->cnext = cp;
1623 if (imem->pcc != 0) {
1624 imem->cc.cnext = imem->pcc->cnext;
1625 imem->cc.cprev = imem->pcc->cprev;
1626 }
1627 }
1628
1629 /* Add a chunk for ordinary allocation. */
1630 static chunk_t *
alloc_add_chunk(gs_ref_memory_t * mem,ulong csize,client_name_t cname)1631 alloc_add_chunk(gs_ref_memory_t * mem, ulong csize, client_name_t cname)
1632 {
1633 chunk_t *cp = alloc_acquire_chunk(mem, csize, true, cname);
1634
1635 if (cp) {
1636 alloc_close_chunk(mem);
1637 mem->pcc = cp;
1638 mem->cc = *mem->pcc;
1639 gs_alloc_fill(mem->cc.cbase, gs_alloc_fill_free,
1640 mem->cc.climit - mem->cc.cbase);
1641 }
1642 return cp;
1643 }
1644
1645 /* Acquire a chunk. If we would exceed MaxLocalVM (if relevant), */
1646 /* or if we would exceed the VMThreshold and psignal is NULL, */
1647 /* return 0; if we would exceed the VMThreshold but psignal is valid, */
1648 /* just set the signal and return successfully. */
1649 static chunk_t *
alloc_acquire_chunk(gs_ref_memory_t * mem,ulong csize,bool has_strings,client_name_t cname)1650 alloc_acquire_chunk(gs_ref_memory_t * mem, ulong csize, bool has_strings,
1651 client_name_t cname)
1652 {
1653 gs_memory_t *parent = mem->non_gc_memory;
1654 chunk_t *cp;
1655 byte *cdata;
1656
1657 #if arch_sizeof_long > arch_sizeof_int
1658 /* If csize is larger than max_uint, punt. */
1659 if (csize != (uint) csize)
1660 return 0;
1661 #endif
1662 cp = gs_raw_alloc_struct_immovable(parent, &st_chunk, cname);
1663
1664 if( mem->gc_status.psignal != 0) {
1665 /* we have a garbage collector */
1666 if ((ulong) (mem->allocated) >= mem->limit) {
1667 mem->gc_status.requested += csize;
1668 if (mem->limit >= mem->gc_status.max_vm) {
1669 gs_free_object(parent, cp, cname);
1670 return 0;
1671 }
1672 if_debug4('0', "[0]signaling space=%d, allocated=%ld, limit=%ld, requested=%ld\n",
1673 mem->space, (long)mem->allocated,
1674 (long)mem->limit, (long)mem->gc_status.requested);
1675 *mem->gc_status.psignal = mem->gc_status.signal_value;
1676 }
1677 }
1678 cdata = gs_alloc_bytes_immovable(parent, csize, cname);
1679 if (cp == 0 || cdata == 0) {
1680 gs_free_object(parent, cdata, cname);
1681 gs_free_object(parent, cp, cname);
1682 mem->gc_status.requested = csize;
1683 return 0;
1684 }
1685 alloc_init_chunk(cp, cdata, cdata + csize, has_strings, (chunk_t *) 0);
1686 alloc_link_chunk(cp, mem);
1687 mem->allocated += st_chunk.ssize + csize;
1688 return cp;
1689 }
1690
1691 /* Initialize the pointers in a chunk. This is exported for save/restore. */
1692 /* The bottom pointer must be aligned, but the top pointer need not */
1693 /* be aligned. */
1694 void
alloc_init_chunk(chunk_t * cp,byte * bot,byte * top,bool has_strings,chunk_t * outer)1695 alloc_init_chunk(chunk_t * cp, byte * bot, byte * top, bool has_strings,
1696 chunk_t * outer)
1697 {
1698 byte *cdata = bot;
1699
1700 if (outer != 0)
1701 outer->inner_count++;
1702 cp->chead = (chunk_head_t *) cdata;
1703 cdata += sizeof(chunk_head_t);
1704 cp->cbot = cp->cbase = cp->int_freed_top = cdata;
1705 cp->cend = top;
1706 cp->rcur = 0;
1707 cp->rtop = 0;
1708 cp->outer = outer;
1709 cp->inner_count = 0;
1710 cp->has_refs = false;
1711 cp->sbase = cdata;
1712 if (has_strings && top - cdata >= string_space_quantum + sizeof(long) - 1) {
1713 /*
1714 * We allocate a large enough string marking and reloc table
1715 * to cover the entire chunk.
1716 */
1717 uint nquanta = string_space_quanta(top - cdata);
1718
1719 cp->climit = cdata + nquanta * string_data_quantum;
1720 cp->smark = cp->climit;
1721 cp->smark_size = string_quanta_mark_size(nquanta);
1722 cp->sreloc =
1723 (string_reloc_offset *) (cp->smark + cp->smark_size);
1724 cp->sfree1 = (uint *) cp->sreloc;
1725 } else {
1726 /* No strings, don't need the string GC tables. */
1727 cp->climit = cp->cend;
1728 cp->sfree1 = 0;
1729 cp->smark = 0;
1730 cp->smark_size = 0;
1731 cp->sreloc = 0;
1732 }
1733 cp->ctop = cp->climit;
1734 alloc_init_free_strings(cp);
1735 }
1736
1737 /* Initialize the string freelists in a chunk. */
1738 void
alloc_init_free_strings(chunk_t * cp)1739 alloc_init_free_strings(chunk_t * cp)
1740 {
1741 if (cp->sfree1)
1742 memset(cp->sfree1, 0, STRING_FREELIST_SPACE(cp));
1743 cp->sfree = 0;
1744 }
1745
1746 /* Close up the current chunk. */
1747 /* This is exported for save/restore and the GC. */
1748 void
alloc_close_chunk(gs_ref_memory_t * mem)1749 alloc_close_chunk(gs_ref_memory_t * mem)
1750 {
1751 if (mem->pcc != 0) {
1752 *mem->pcc = mem->cc;
1753 #ifdef DEBUG
1754 if (gs_debug_c('a')) {
1755 dlprintf1("[a%d]", alloc_trace_space(mem));
1756 dprintf_chunk("closing chunk", mem->pcc);
1757 }
1758 #endif
1759 }
1760 }
1761
1762 /* Reopen the current chunk after a GC or restore. */
1763 void
alloc_open_chunk(gs_ref_memory_t * mem)1764 alloc_open_chunk(gs_ref_memory_t * mem)
1765 {
1766 if (mem->pcc != 0) {
1767 mem->cc = *mem->pcc;
1768 #ifdef DEBUG
1769 if (gs_debug_c('a')) {
1770 dlprintf1("[a%d]", alloc_trace_space(mem));
1771 dprintf_chunk("opening chunk", mem->pcc);
1772 }
1773 #endif
1774 }
1775 }
1776
1777 /* Remove a chunk from the chain. This is exported for the GC. */
1778 void
alloc_unlink_chunk(chunk_t * cp,gs_ref_memory_t * mem)1779 alloc_unlink_chunk(chunk_t * cp, gs_ref_memory_t * mem)
1780 {
1781 #ifdef DEBUG
1782 if (gs_alloc_debug) { /* Check to make sure this chunk belongs to this allocator. */
1783 const chunk_t *ap = mem->cfirst;
1784
1785 while (ap != 0 && ap != cp)
1786 ap = ap->cnext;
1787 if (ap != cp) {
1788 lprintf2("unlink_chunk 0x%lx not owned by memory 0x%lx!\n",
1789 (ulong) cp, (ulong) mem);
1790 return; /*gs_abort(); */
1791 }
1792 }
1793 #endif
1794 if (cp->cprev == 0)
1795 mem->cfirst = cp->cnext;
1796 else
1797 cp->cprev->cnext = cp->cnext;
1798 if (cp->cnext == 0)
1799 mem->clast = cp->cprev;
1800 else
1801 cp->cnext->cprev = cp->cprev;
1802 if (mem->pcc != 0) {
1803 mem->cc.cnext = mem->pcc->cnext;
1804 mem->cc.cprev = mem->pcc->cprev;
1805 if (mem->pcc == cp) {
1806 mem->pcc = 0;
1807 mem->cc.cbot = mem->cc.ctop = 0;
1808 }
1809 }
1810 }
1811
1812 /*
1813 * Free a chunk. This is exported for the GC. Since we eventually use
1814 * this to free the chunk containing the allocator itself, we must be
1815 * careful not to reference anything in the allocator after freeing the
1816 * chunk data.
1817 */
1818 void
alloc_free_chunk(chunk_t * cp,gs_ref_memory_t * mem)1819 alloc_free_chunk(chunk_t * cp, gs_ref_memory_t * mem)
1820 {
1821 gs_memory_t *parent = mem->non_gc_memory;
1822 byte *cdata = (byte *)cp->chead;
1823 ulong csize = (byte *)cp->cend - cdata;
1824
1825 alloc_unlink_chunk(cp, mem);
1826 mem->allocated -= st_chunk.ssize;
1827 if (mem->cfreed.cp == cp)
1828 mem->cfreed.cp = 0;
1829 if (cp->outer == 0) {
1830 mem->allocated -= csize;
1831 gs_free_object(parent, cdata, "alloc_free_chunk(data)");
1832 } else {
1833 cp->outer->inner_count--;
1834 gs_alloc_fill(cdata, gs_alloc_fill_free, csize);
1835 }
1836 gs_free_object(parent, cp, "alloc_free_chunk(chunk struct)");
1837 }
1838
1839 /* Find the chunk for a pointer. */
1840 /* Note that this only searches the current save level. */
1841 /* Since a given save level can't contain both a chunk and an inner chunk */
1842 /* of that chunk, we can stop when is_within_chunk succeeds, and just test */
1843 /* is_in_inner_chunk then. */
1844 bool
chunk_locate_ptr(const void * ptr,chunk_locator_t * clp)1845 chunk_locate_ptr(const void *ptr, chunk_locator_t * clp)
1846 {
1847 register chunk_t *cp = clp->cp;
1848
1849 if (cp == 0) {
1850 cp = clp->memory->cfirst;
1851 if (cp == 0)
1852 return false;
1853 /* ptr is in the last chunk often enough to be worth checking for. */
1854 if (PTR_GE(ptr, clp->memory->clast->cbase))
1855 cp = clp->memory->clast;
1856 }
1857 if (PTR_LT(ptr, cp->cbase)) {
1858 do {
1859 cp = cp->cprev;
1860 if (cp == 0)
1861 return false;
1862 }
1863 while (PTR_LT(ptr, cp->cbase));
1864 if (PTR_GE(ptr, cp->cend))
1865 return false;
1866 } else {
1867 while (PTR_GE(ptr, cp->cend)) {
1868 cp = cp->cnext;
1869 if (cp == 0)
1870 return false;
1871 }
1872 if (PTR_LT(ptr, cp->cbase))
1873 return false;
1874 }
1875 clp->cp = cp;
1876 return !ptr_is_in_inner_chunk(ptr, cp);
1877 }
1878
1879 /* ------ Debugging ------ */
1880
1881 #ifdef DEBUG
1882
1883 #include "string_.h"
1884
1885 static inline bool
obj_in_control_region(const void * obot,const void * otop,const dump_control_t * pdc)1886 obj_in_control_region(const void *obot, const void *otop,
1887 const dump_control_t *pdc)
1888 {
1889 return
1890 ((pdc->bottom == NULL || PTR_GT(otop, pdc->bottom)) &&
1891 (pdc->top == NULL || PTR_LT(obot, pdc->top)));
1892 }
1893
1894 const dump_control_t dump_control_default =
1895 {
1896 dump_do_default, NULL, NULL
1897 };
1898 const dump_control_t dump_control_all =
1899 {
1900 dump_do_strings | dump_do_type_addresses | dump_do_pointers |
1901 dump_do_pointed_strings | dump_do_contents, NULL, NULL
1902 };
1903
1904 const dump_control_t dump_control_no_contents =
1905 {
1906 dump_do_strings | dump_do_type_addresses | dump_do_pointers |
1907 dump_do_pointed_strings, NULL, NULL
1908 };
1909
1910 /*
1911 * Internal procedure to dump a block of memory, in hex and optionally
1912 * also as characters.
1913 */
1914 static void
debug_indent(int indent)1915 debug_indent(int indent)
1916 {
1917 int i;
1918
1919 for (i = indent; i > 0; --i)
1920 dputc(' ');
1921 }
1922 static void
debug_dump_contents(const byte * bot,const byte * top,int indent,bool as_chars)1923 debug_dump_contents(const byte * bot, const byte * top, int indent,
1924 bool as_chars)
1925 {
1926 const byte *block;
1927
1928 #define block_size 16
1929
1930 if (bot >= top)
1931 return;
1932 for (block = bot - ((bot - (byte *) 0) & (block_size - 1));
1933 block < top; block += block_size
1934 ) {
1935 int i;
1936 char label[12];
1937
1938 /* Check for repeated blocks. */
1939 if (block >= bot + block_size &&
1940 block <= top - (block_size * 2) &&
1941 !memcmp(block, block - block_size, block_size) &&
1942 !memcmp(block, block + block_size, block_size)
1943 ) {
1944 if (block < bot + block_size * 2 ||
1945 memcmp(block, block - block_size * 2, block_size)
1946 ) {
1947 debug_indent(indent);
1948 dputs(" ...\n");
1949 }
1950 continue;
1951 }
1952 sprintf(label, "0x%lx:", (ulong) block);
1953 debug_indent(indent);
1954 dputs(label);
1955 for (i = 0; i < block_size; ++i) {
1956 const char *sepr = ((i & 3) == 0 && i != 0 ? " " : " ");
1957
1958 dputs(sepr);
1959 if (block + i >= bot && block + i < top)
1960 dprintf1("%02x", block[i]);
1961 else
1962 dputs(" ");
1963 }
1964 dputc('\n');
1965 if (as_chars) {
1966 debug_indent(indent + strlen(label));
1967 for (i = 0; i < block_size; ++i) {
1968 byte ch;
1969
1970 if ((i & 3) == 0 && i != 0)
1971 dputc(' ');
1972 if (block + i >= bot && block + i < top &&
1973 (ch = block[i]) >= 32 && ch <= 126
1974 )
1975 dprintf1(" %c", ch);
1976 else
1977 dputs(" ");
1978 }
1979 dputc('\n');
1980 }
1981 }
1982 #undef block_size
1983 }
1984
1985 /* Print one object with the given options. */
1986 /* Relevant options: type_addresses, no_types, pointers, pointed_strings, */
1987 /* contents. */
1988 void
debug_print_object(const gs_memory_t * mem,const void * obj,const dump_control_t * control)1989 debug_print_object(const gs_memory_t *mem, const void *obj, const dump_control_t * control)
1990 {
1991 const obj_header_t *pre = ((const obj_header_t *)obj) - 1;
1992 ulong size = pre_obj_contents_size(pre);
1993 const gs_memory_struct_type_t *type = pre->o_type;
1994 dump_options_t options = control->options;
1995
1996 dprintf3(" pre=0x%lx(obj=0x%lx) size=%lu", (ulong) pre, (ulong) obj,
1997 size);
1998 switch (options & (dump_do_type_addresses | dump_do_no_types)) {
1999 case dump_do_type_addresses + dump_do_no_types: /* addresses only */
2000 dprintf1(" type=0x%lx", (ulong) type);
2001 break;
2002 case dump_do_type_addresses: /* addresses & names */
2003 dprintf2(" type=%s(0x%lx)", struct_type_name_string(type),
2004 (ulong) type);
2005 break;
2006 case 0: /* names only */
2007 dprintf1(" type=%s", struct_type_name_string(type));
2008 case dump_do_no_types: /* nothing */
2009 ;
2010 }
2011 if (options & dump_do_marks) {
2012 dprintf2(" smark/back=%u (0x%x)", pre->o_smark, pre->o_smark);
2013 }
2014 dputc('\n');
2015 if (type == &st_free)
2016 return;
2017 if (options & dump_do_pointers) {
2018 struct_proc_enum_ptrs((*proc)) = type->enum_ptrs;
2019 uint index = 0;
2020 enum_ptr_t eptr;
2021 gs_ptr_type_t ptype;
2022
2023 if (proc != gs_no_struct_enum_ptrs) {
2024 if (proc != 0) {
2025 for (; (ptype = (*proc)(mem, pre + 1, size, index, &eptr, type, NULL)) != 0;
2026 ++index
2027 ) {
2028 const void *ptr = eptr.ptr;
2029
2030 dprintf1(" ptr %u: ", index);
2031 if (ptype == ptr_string_type || ptype == ptr_const_string_type) {
2032 const gs_const_string *str = (const gs_const_string *)&eptr;
2033 if (!str)
2034 dprintf("0x0");
2035 else
2036 dprintf2("0x%lx(%u)", (ulong) str->data, str->size);
2037 if (options & dump_do_pointed_strings) {
2038 dputs(" =>\n");
2039 if (!str)
2040 dprintf("(null)\n");
2041 else
2042 debug_dump_contents(str->data, str->data + str->size, 6,
2043 true);
2044 } else {
2045 dputc('\n');
2046 }
2047 } else {
2048 dprintf1((PTR_BETWEEN(ptr, obj, (const byte *)obj + size) ?
2049 "(0x%lx)\n" : "0x%lx\n"), (ulong) ptr);
2050 }
2051 }
2052 } else { /* proc == 0 */
2053 dprintf("previous line should be a ref\n");
2054 }
2055 } /* proc != gs_no_struct_enum_ptrs */
2056 }
2057 if (options & dump_do_contents) {
2058 debug_dump_contents((const byte *)obj, (const byte *)obj + size,
2059 0, false);
2060 }
2061 }
2062
2063 /* Print the contents of a chunk with the given options. */
2064 /* Relevant options: all. */
2065 void
debug_dump_chunk(const gs_memory_t * mem,const chunk_t * cp,const dump_control_t * control)2066 debug_dump_chunk(const gs_memory_t *mem, const chunk_t * cp, const dump_control_t * control)
2067 {
2068 dprintf1("chunk at 0x%lx:\n", (ulong) cp);
2069 dprintf3(" chead=0x%lx cbase=0x%lx sbase=0x%lx\n",
2070 (ulong) cp->chead, (ulong) cp->cbase, (ulong) cp->sbase);
2071 dprintf3(" rcur=0x%lx rtop=0x%lx cbot=0x%lx\n",
2072 (ulong) cp->rcur, (ulong) cp->rtop, (ulong) cp->cbot);
2073 dprintf4(" ctop=0x%lx climit=0x%lx smark=0x%lx, size=%u\n",
2074 (ulong) cp->ctop, (ulong) cp->climit, (ulong) cp->smark,
2075 cp->smark_size);
2076 dprintf2(" sreloc=0x%lx cend=0x%lx\n",
2077 (ulong) cp->sreloc, (ulong) cp->cend);
2078 dprintf5("cprev=0x%lx cnext=0x%lx outer=0x%lx inner_count=%u has_refs=%s\n",
2079 (ulong) cp->cprev, (ulong) cp->cnext, (ulong) cp->outer,
2080 cp->inner_count, (cp->has_refs ? "true" : "false"));
2081
2082 dprintf2(" sfree1=0x%lx sfree=0x%x\n",
2083 (ulong) cp->sfree1, cp->sfree);
2084 if (control->options & dump_do_strings) {
2085 debug_dump_contents((control->bottom == 0 ? cp->ctop :
2086 max(control->bottom, cp->ctop)),
2087 (control->top == 0 ? cp->climit :
2088 min(control->top, cp->climit)),
2089 0, true);
2090 }
2091 SCAN_CHUNK_OBJECTS(cp)
2092 DO_ALL
2093 if (obj_in_control_region(pre + 1,
2094 (const byte *)(pre + 1) + size,
2095 control)
2096 )
2097 debug_print_object(mem, pre + 1, control);
2098 END_OBJECTS_SCAN_NO_ABORT
2099 }
2100 void
debug_print_chunk(const gs_memory_t * mem,const chunk_t * cp)2101 debug_print_chunk(const gs_memory_t *mem, const chunk_t * cp)
2102 {
2103 dump_control_t control;
2104
2105 control = dump_control_default;
2106 debug_dump_chunk(mem, cp, &control);
2107 }
2108
2109 /* Print the contents of all chunks managed by an allocator. */
2110 /* Relevant options: all. */
2111 void
debug_dump_memory(const gs_ref_memory_t * mem,const dump_control_t * control)2112 debug_dump_memory(const gs_ref_memory_t * mem, const dump_control_t * control)
2113 {
2114 const chunk_t *mcp;
2115
2116 for (mcp = mem->cfirst; mcp != 0; mcp = mcp->cnext) {
2117 const chunk_t *cp = (mcp == mem->pcc ? &mem->cc : mcp);
2118
2119 if (obj_in_control_region(cp->cbase, cp->cend, control))
2120 debug_dump_chunk((const gs_memory_t *)mem, cp, control);
2121 }
2122 }
2123
2124 void
debug_dump_allocator(const gs_ref_memory_t * mem)2125 debug_dump_allocator(const gs_ref_memory_t *mem)
2126 {
2127 debug_dump_memory(mem, &dump_control_no_contents);
2128 }
2129
2130 /* Find all the objects that contain a given pointer. */
2131 void
debug_find_pointers(const gs_ref_memory_t * mem,const void * target)2132 debug_find_pointers(const gs_ref_memory_t *mem, const void *target)
2133 {
2134 dump_control_t control;
2135 const chunk_t *mcp;
2136
2137 control.options = 0;
2138 for (mcp = mem->cfirst; mcp != 0; mcp = mcp->cnext) {
2139 const chunk_t *cp = (mcp == mem->pcc ? &mem->cc : mcp);
2140
2141 SCAN_CHUNK_OBJECTS(cp);
2142 DO_ALL
2143 struct_proc_enum_ptrs((*proc)) = pre->o_type->enum_ptrs;
2144 uint index = 0;
2145 enum_ptr_t eptr;
2146
2147 if (proc) /* doesn't trace refs NB fix me. */
2148 for (; (*proc)((const gs_memory_t *)mem, pre + 1, size, index,
2149 &eptr, pre->o_type, NULL);
2150 ++index)
2151 if (eptr.ptr == target) {
2152 dprintf1("Index %d in", index);
2153 debug_print_object((const gs_memory_t *)mem, pre + 1, &control);
2154 }
2155 END_OBJECTS_SCAN_NO_ABORT
2156 }
2157 }
2158
2159 #endif /* DEBUG */
2160