1 /* Copyright (C) 2001-2012 Artifex Software, Inc.
2    All Rights Reserved.
3 
4    This software is provided AS-IS with no warranty, either express or
5    implied.
6 
7    This software is distributed under license and may not be copied,
8    modified or distributed except as expressly authorized under the terms
9    of the license contained in the file LICENSE in this distribution.
10 
11    Refer to licensing information at http://www.artifex.com or contact
12    Artifex Software, Inc.,  7 Mt. Lassen Drive - Suite A-134, San Rafael,
13    CA  94903, U.S.A., +1(415)492-9861, for further information.
14 */
15 
16 
17 /* C heap allocator */
18 #include "malloc_.h"
19 #include "gdebug.h"
20 #include "gserrors.h"
21 #include "gstypes.h"
22 #include "gsmemory.h"
23 #include "gsmdebug.h"
24 #include "gsstruct.h"		/* for st_bytes */
25 #include "gsmalloc.h"
26 #include "gsmemret.h"		/* retrying wrapper */
27 
28 /* ------ Heap allocator ------ */
29 
30 /*
31  * An implementation of Ghostscript's memory manager interface
32  * that works directly with the C heap.  We keep track of all allocated
33  * blocks so we can free them at cleanup time.
34  */
35 /* Raw memory procedures */
36 static gs_memory_proc_alloc_bytes(gs_heap_alloc_bytes);
37 static gs_memory_proc_resize_object(gs_heap_resize_object);
38 static gs_memory_proc_free_object(gs_heap_free_object);
39 static gs_memory_proc_stable(gs_heap_stable);
40 static gs_memory_proc_status(gs_heap_status);
41 static gs_memory_proc_free_all(gs_heap_free_all);
42 
43 /* Object memory procedures */
44 static gs_memory_proc_alloc_struct(gs_heap_alloc_struct);
45 static gs_memory_proc_alloc_byte_array(gs_heap_alloc_byte_array);
46 static gs_memory_proc_alloc_struct_array(gs_heap_alloc_struct_array);
47 static gs_memory_proc_object_size(gs_heap_object_size);
48 static gs_memory_proc_object_type(gs_heap_object_type);
49 static gs_memory_proc_alloc_string(gs_heap_alloc_string);
50 static gs_memory_proc_resize_string(gs_heap_resize_string);
51 static gs_memory_proc_free_string(gs_heap_free_string);
52 static gs_memory_proc_register_root(gs_heap_register_root);
53 static gs_memory_proc_unregister_root(gs_heap_unregister_root);
54 static gs_memory_proc_enable_free(gs_heap_enable_free);
55 static const gs_memory_procs_t gs_malloc_memory_procs =
56 {
57     /* Raw memory procedures */
58     gs_heap_alloc_bytes,
59     gs_heap_resize_object,
60     gs_heap_free_object,
61     gs_heap_stable,
62     gs_heap_status,
63     gs_heap_free_all,
64     gs_ignore_consolidate_free,
65     /* Object memory procedures */
66     gs_heap_alloc_bytes,
67     gs_heap_alloc_struct,
68     gs_heap_alloc_struct,
69     gs_heap_alloc_byte_array,
70     gs_heap_alloc_byte_array,
71     gs_heap_alloc_struct_array,
72     gs_heap_alloc_struct_array,
73     gs_heap_object_size,
74     gs_heap_object_type,
75     gs_heap_alloc_string,
76     gs_heap_alloc_string,
77     gs_heap_resize_string,
78     gs_heap_free_string,
79     gs_heap_register_root,
80     gs_heap_unregister_root,
81     gs_heap_enable_free
82 };
83 
84 /* We must make sure that malloc_blocks leave the block aligned. */
85 /*typedef struct gs_malloc_block_s gs_malloc_block_t; */
86 #define malloc_block_data\
87         gs_malloc_block_t *next;\
88         gs_malloc_block_t *prev;\
89         uint size;\
90         gs_memory_type_ptr_t type;\
91         client_name_t cname
92 struct malloc_block_data_s {
93     malloc_block_data;
94 };
95 struct gs_malloc_block_s {
96     malloc_block_data;
97 /* ANSI C does not allow zero-size arrays, so we need the following */
98 /* unnecessary and wasteful workaround: */
99 #define _npad (-size_of(struct malloc_block_data_s) & (ARCH_ALIGN_MEMORY_MOD - 1))
100     byte _pad[(_npad == 0 ? ARCH_ALIGN_MEMORY_MOD : _npad)];
101 #undef _npad
102 };
103 
104 /* Initialize a malloc allocator. */
105 static long heap_available(void);
106 gs_malloc_memory_t *
gs_malloc_memory_init(void)107 gs_malloc_memory_init(void)
108 {
109     gs_malloc_memory_t *mem =
110         (gs_malloc_memory_t *)Memento_label(malloc(sizeof(gs_malloc_memory_t)), "gs_malloc_memory_t");
111 
112     if (mem == NULL)
113         return NULL;
114 
115     mem->stable_memory = 0;	/* just for tidyness, never referenced */
116     mem->procs = gs_malloc_memory_procs;
117     mem->allocated = 0;
118     mem->limit = max_long;
119     mem->used = 0;
120     mem->max_used = 0;
121     mem->gs_lib_ctx = 0;
122     mem->non_gc_memory = (gs_memory_t *)mem;
123     mem->thread_safe_memory = (gs_memory_t *)mem;	/* this allocator is thread safe */
124     /* Allocate a monitor to serialize access to structures within */
125     mem->monitor = NULL;	/* prevent use during initial allocation */
126     mem->monitor = gx_monitor_alloc((gs_memory_t *)mem);
127 
128     return mem;
129 }
130 /*
131  * Estimate the amount of available memory by probing with mallocs.
132  * We may under-estimate by a lot, but that's better than winding up with
133  * a seriously inflated address space.  This is quite a hack!
134  */
135 #define max_malloc_probes 20
136 #define malloc_probe_size 64000
137 static long
heap_available()138 heap_available()
139 {
140     long avail = 0;
141     void *probes[max_malloc_probes];
142     uint n;
143 
144     for (n = 0; n < max_malloc_probes; n++) {
145         if ((probes[n] = malloc(malloc_probe_size)) == 0)
146             break;
147         if_debug2('a', "[a]heap_available probe[%d]=0x%lx\n",
148                   n, (ulong) probes[n]);
149         avail += malloc_probe_size;
150     }
151     while (n)
152         free(probes[--n]);
153     return avail;
154 }
155 
156 /* Allocate various kinds of blocks. */
157 static byte *
gs_heap_alloc_bytes(gs_memory_t * mem,uint size,client_name_t cname)158 gs_heap_alloc_bytes(gs_memory_t * mem, uint size, client_name_t cname)
159 {
160     gs_malloc_memory_t *mmem = (gs_malloc_memory_t *) mem;
161     byte *ptr = 0;
162 
163 #ifdef DEBUG
164     const char *msg;
165     static const char *const ok_msg = "OK";
166 
167 #  define set_msg(str) (msg = (str))
168 #else
169 #  define set_msg(str) DO_NOTHING
170 #endif
171 
172         /* Exclusive acces so our decisions and changes are 'atomic' */
173     if (mmem->monitor)
174         gx_monitor_enter(mmem->monitor);
175     if (size > mmem->limit - sizeof(gs_malloc_block_t)) {
176         /* Definitely too large to allocate; also avoids overflow. */
177         set_msg("exceeded limit");
178     } else {
179         uint added = size + sizeof(gs_malloc_block_t);
180 
181         if (added <= size || mmem->limit - added < mmem->used)
182             set_msg("exceeded limit");
183         else if ((ptr = (byte *) Memento_label(malloc(added), cname)) == 0)
184             set_msg("failed");
185         else {
186             gs_malloc_block_t *bp = (gs_malloc_block_t *) ptr;
187 
188             /*
189              * We would like to check that malloc aligns blocks at least as
190              * strictly as the compiler (as defined by ARCH_ALIGN_MEMORY_MOD).
191              * However, Microsoft VC 6 does not satisfy this requirement.
192              * See gsmemory.h for more explanation.
193              */
194             set_msg(ok_msg);
195             if (mmem->allocated)
196                 mmem->allocated->prev = bp;
197             bp->next = mmem->allocated;
198             bp->prev = 0;
199             bp->size = size;
200             bp->type = &st_bytes;
201             bp->cname = cname;
202             mmem->allocated = bp;
203             ptr = (byte *) (bp + 1);
204             mmem->used += size + sizeof(gs_malloc_block_t);
205             if (mmem->used > mmem->max_used)
206                 mmem->max_used = mmem->used;
207         }
208     }
209     if (mmem->monitor)
210         gx_monitor_leave(mmem->monitor);	/* Done with exclusive access */
211     /* We don't want to 'fill' under mutex to keep the window smaller */
212     if (ptr)
213         gs_alloc_fill(ptr, gs_alloc_fill_alloc, size);
214 #ifdef DEBUG
215     if (gs_debug_c('a') || msg != ok_msg)
216         dlprintf6("[a+]gs_malloc(%s)(%u) = 0x%lx: %s, used=%ld, max=%ld\n",
217                   client_name_string(cname), size, (ulong) ptr, msg, mmem->used, mmem->max_used);
218 #endif
219     return ptr;
220 #undef set_msg
221 }
222 static void *
gs_heap_alloc_struct(gs_memory_t * mem,gs_memory_type_ptr_t pstype,client_name_t cname)223 gs_heap_alloc_struct(gs_memory_t * mem, gs_memory_type_ptr_t pstype,
224                      client_name_t cname)
225 {
226     void *ptr =
227     gs_heap_alloc_bytes(mem, gs_struct_type_size(pstype), cname);
228 
229     if (ptr == 0)
230         return 0;
231     ((gs_malloc_block_t *) ptr)[-1].type = pstype;
232     return ptr;
233 }
234 static byte *
gs_heap_alloc_byte_array(gs_memory_t * mem,uint num_elements,uint elt_size,client_name_t cname)235 gs_heap_alloc_byte_array(gs_memory_t * mem, uint num_elements, uint elt_size,
236                          client_name_t cname)
237 {
238     ulong lsize = (ulong) num_elements * elt_size;
239 
240     if (lsize != (uint) lsize)
241         return 0;
242     return gs_heap_alloc_bytes(mem, (uint) lsize, cname);
243 }
244 static void *
gs_heap_alloc_struct_array(gs_memory_t * mem,uint num_elements,gs_memory_type_ptr_t pstype,client_name_t cname)245 gs_heap_alloc_struct_array(gs_memory_t * mem, uint num_elements,
246                            gs_memory_type_ptr_t pstype, client_name_t cname)
247 {
248     void *ptr =
249     gs_heap_alloc_byte_array(mem, num_elements,
250                              gs_struct_type_size(pstype), cname);
251 
252     if (ptr == 0)
253         return 0;
254     ((gs_malloc_block_t *) ptr)[-1].type = pstype;
255     return ptr;
256 }
257 static void *
gs_heap_resize_object(gs_memory_t * mem,void * obj,uint new_num_elements,client_name_t cname)258 gs_heap_resize_object(gs_memory_t * mem, void *obj, uint new_num_elements,
259                       client_name_t cname)
260 {
261     gs_malloc_memory_t *mmem = (gs_malloc_memory_t *) mem;
262     gs_malloc_block_t *ptr = (gs_malloc_block_t *) obj - 1;
263     gs_memory_type_ptr_t pstype = ptr->type;
264     uint old_size = gs_object_size(mem, obj) + sizeof(gs_malloc_block_t);
265     uint new_size =
266         gs_struct_type_size(pstype) * new_num_elements +
267         sizeof(gs_malloc_block_t);
268     gs_malloc_block_t *new_ptr;
269 
270     if (new_size == old_size)
271         return obj;
272     if (mmem->monitor)
273         gx_monitor_enter(mmem->monitor);	/* Exclusive access */
274     new_ptr = (gs_malloc_block_t *) gs_realloc(ptr, old_size, new_size);
275     if (new_ptr == 0)
276         return 0;
277     if (new_ptr->prev)
278         new_ptr->prev->next = new_ptr;
279     else
280         mmem->allocated = new_ptr;
281     if (new_ptr->next)
282         new_ptr->next->prev = new_ptr;
283     new_ptr->size = new_size - sizeof(gs_malloc_block_t);
284     mmem->used -= old_size;
285     mmem->used += new_size;
286     if (mmem->monitor)
287         gx_monitor_leave(mmem->monitor);	/* Done with exclusive access */
288     if (new_size > old_size)
289         gs_alloc_fill((byte *) new_ptr + old_size,
290                       gs_alloc_fill_alloc, new_size - old_size);
291     return new_ptr + 1;
292 }
293 static uint
gs_heap_object_size(gs_memory_t * mem,const void * ptr)294 gs_heap_object_size(gs_memory_t * mem, const void *ptr)
295 {
296     return ((const gs_malloc_block_t *)ptr)[-1].size;
297 }
298 static gs_memory_type_ptr_t
gs_heap_object_type(const gs_memory_t * mem,const void * ptr)299 gs_heap_object_type(const gs_memory_t * mem, const void *ptr)
300 {
301     return ((const gs_malloc_block_t *)ptr)[-1].type;
302 }
303 static void
gs_heap_free_object(gs_memory_t * mem,void * ptr,client_name_t cname)304 gs_heap_free_object(gs_memory_t * mem, void *ptr, client_name_t cname)
305 {
306     gs_malloc_memory_t *mmem = (gs_malloc_memory_t *) mem;
307     gs_malloc_block_t *bp;
308     gs_memory_type_ptr_t pstype;
309     struct_proc_finalize((*finalize));
310 
311     if_debug3('a', "[a-]gs_free(%s) 0x%lx(%u)\n",
312               client_name_string(cname), (ulong) ptr,
313               (ptr == 0 ? 0 : ((gs_malloc_block_t *) ptr)[-1].size));
314     if (ptr == 0)
315         return;
316     pstype = ((gs_malloc_block_t *) ptr)[-1].type;
317     finalize = pstype->finalize;
318     if (finalize != 0) {
319         if_debug3('u', "[u]finalizing %s 0x%lx (%s)\n",
320                   struct_type_name_string(pstype),
321                   (ulong) ptr, client_name_string(cname));
322         (*finalize) (mem, ptr);
323     }
324     if (mmem->monitor)
325         gx_monitor_enter(mmem->monitor);	/* Exclusive access */
326     /* Previously, we used to search through every allocated block to find
327      * the block we are freeing. This gives us safety in that an attempt to
328      * free an unallocated block will not go wrong. This does radically
329      * slow down frees though, so we replace it with this simpler code; we
330      * now assume that the block is valid, and hence avoid the search.
331      */
332 #if 1
333     bp = &((gs_malloc_block_t *)ptr)[-1];
334     if (bp->prev)
335         bp->prev->next = bp->next;
336     if (bp->next)
337         bp->next->prev = bp->prev;
338     if (bp == mmem->allocated) {
339         mmem->allocated = bp->next;
340         mmem->allocated->prev = NULL;
341     }
342     mmem->used -= bp->size + sizeof(gs_malloc_block_t);
343     if (mmem->monitor)
344         gx_monitor_leave(mmem->monitor);	/* Done with exclusive access */
345     gs_alloc_fill(bp, gs_alloc_fill_free,
346                   bp->size + sizeof(gs_malloc_block_t));
347     free(bp);
348 #else
349     bp = mmem->allocated; /* If 'finalize' releases a memory,
350                              this function could be called recursively and
351                              change mmem->allocated. */
352     if (ptr == bp + 1) {
353         mmem->allocated = bp->next;
354         mmem->used -= bp->size + sizeof(gs_malloc_block_t);
355 
356         if (mmem->allocated)
357             mmem->allocated->prev = 0;
358         if (mmem->monitor)
359             gx_monitor_leave(mmem->monitor);	/* Done with exclusive access */
360         gs_alloc_fill(bp, gs_alloc_fill_free,
361                       bp->size + sizeof(gs_malloc_block_t));
362         free(bp);
363     } else {
364         gs_malloc_block_t *np;
365 
366         /*
367          * bp == 0 at this point is an error, but we'd rather have an
368          * error message than an invalid access.
369          */
370         if (bp) {
371             for (; (np = bp->next) != 0; bp = np) {
372                 if (ptr == np + 1) {
373                     bp->next = np->next;
374                     if (np->next)
375                         np->next->prev = bp;
376                     mmem->used -= np->size + sizeof(gs_malloc_block_t);
377                     if (mmem->monitor)
378                         gx_monitor_leave(mmem->monitor);	/* Done with exclusive access */
379                     gs_alloc_fill(np, gs_alloc_fill_free,
380                                   np->size + sizeof(gs_malloc_block_t));
381                     free(np);
382                     return;
383                 }
384             }
385         }
386         if (mmem->monitor)
387             gx_monitor_leave(mmem->monitor);	/* Done with exclusive access */
388         lprintf2("%s: free 0x%lx not found!\n",
389                  client_name_string(cname), (ulong) ptr);
390         free((char *)((gs_malloc_block_t *) ptr - 1));
391     }
392 #endif
393 }
394 static byte *
gs_heap_alloc_string(gs_memory_t * mem,uint nbytes,client_name_t cname)395 gs_heap_alloc_string(gs_memory_t * mem, uint nbytes, client_name_t cname)
396 {
397     return gs_heap_alloc_bytes(mem, nbytes, cname);
398 }
399 static byte *
gs_heap_resize_string(gs_memory_t * mem,byte * data,uint old_num,uint new_num,client_name_t cname)400 gs_heap_resize_string(gs_memory_t * mem, byte * data, uint old_num, uint new_num,
401                       client_name_t cname)
402 {
403     if (gs_heap_object_type(mem, data) != &st_bytes)
404         lprintf2("%s: resizing non-string 0x%lx!\n",
405                  client_name_string(cname), (ulong) data);
406     return gs_heap_resize_object(mem, data, new_num, cname);
407 }
408 static void
gs_heap_free_string(gs_memory_t * mem,byte * data,uint nbytes,client_name_t cname)409 gs_heap_free_string(gs_memory_t * mem, byte * data, uint nbytes,
410                     client_name_t cname)
411 {
412     /****** SHOULD CHECK SIZE IF DEBUGGING ******/
413     gs_heap_free_object(mem, data, cname);
414 }
415 static int
gs_heap_register_root(gs_memory_t * mem,gs_gc_root_t * rp,gs_ptr_type_t ptype,void ** up,client_name_t cname)416 gs_heap_register_root(gs_memory_t * mem, gs_gc_root_t * rp,
417                       gs_ptr_type_t ptype, void **up, client_name_t cname)
418 {
419     return 0;
420 }
421 static void
gs_heap_unregister_root(gs_memory_t * mem,gs_gc_root_t * rp,client_name_t cname)422 gs_heap_unregister_root(gs_memory_t * mem, gs_gc_root_t * rp,
423                         client_name_t cname)
424 {
425 }
426 static gs_memory_t *
gs_heap_stable(gs_memory_t * mem)427 gs_heap_stable(gs_memory_t *mem)
428 {
429     return mem;			/* heap memory is stable */
430 }
431 
432 /*
433  * NB: In a multi-threaded application, this is only a 'snapshot'
434  *     since other threads may change the heap_status. The heap_available()
435  *     probe is just an approximation anyway.
436  */
437 static void
gs_heap_status(gs_memory_t * mem,gs_memory_status_t * pstat)438 gs_heap_status(gs_memory_t * mem, gs_memory_status_t * pstat)
439 {
440     gs_malloc_memory_t *mmem = (gs_malloc_memory_t *) mem;
441 
442     pstat->allocated = mmem->used + heap_available();
443     pstat->used = mmem->used;
444     pstat->is_thread_safe = true;	/* this allocator has a mutex (monitor) and IS thread safe */
445 }
446 static void
gs_heap_enable_free(gs_memory_t * mem,bool enable)447 gs_heap_enable_free(gs_memory_t * mem, bool enable)
448 {
449     if (enable)
450         mem->procs.free_object = gs_heap_free_object,
451             mem->procs.free_string = gs_heap_free_string;
452     else
453         mem->procs.free_object = gs_ignore_free_object,
454             mem->procs.free_string = gs_ignore_free_string;
455 }
456 
457 /* Release all memory acquired by this allocator. */
458 static void
gs_heap_free_all(gs_memory_t * mem,uint free_mask,client_name_t cname)459 gs_heap_free_all(gs_memory_t * mem, uint free_mask, client_name_t cname)
460 {
461     gs_malloc_memory_t *const mmem = (gs_malloc_memory_t *) mem;
462     gx_monitor_t *mon = mmem->monitor;
463 
464     /*
465      * We don't perform locking during this process since the 'monitor'
466      * is contained in this allocator, and will get freed along the way.
467      * It is only called at exit, and there better not be any threads
468      * accessing this allocator.
469      */
470     mmem->monitor = NULL; 	/* delete reference to this monitor */
471     gx_monitor_free(mon);	/* free the monitor */
472 #ifndef MEMENTO
473     /* Normally gs calls this on closedown, and it frees every block that
474      * has ever been allocated. This is not helpful for leak checking. */
475     if (free_mask & FREE_ALL_DATA) {
476         gs_malloc_block_t *bp = mmem->allocated;
477         gs_malloc_block_t *np;
478 
479         for (; bp != 0; bp = np) {
480             np = bp->next;
481             if_debug3('a', "[a]gs_heap_free_all(%s) 0x%lx(%u)\n",
482                       client_name_string(bp->cname), (ulong) (bp + 1),
483                       bp->size);
484             gs_alloc_fill(bp + 1, gs_alloc_fill_free, bp->size);
485             free(bp);
486         }
487     }
488 #endif
489     if (free_mask & FREE_ALL_ALLOCATOR)
490         free(mem);
491 }
492 
493 /* ------ Wrapping ------ */
494 
495 /* Create the retrying and the locked wrapper for the heap allocator. */
496 int
gs_malloc_wrap(gs_memory_t ** wrapped,gs_malloc_memory_t * contents)497 gs_malloc_wrap(gs_memory_t **wrapped, gs_malloc_memory_t *contents)
498 {
499 #  ifdef USE_RETRY_MEMORY_WRAPPER
500     /*
501      * This is deprecated since 'retry' for clist reversion/cycling
502      * will ONLY work for monochrome, simple PS or PCL, not for a
503      * color device and not for PDF or XPS with transparency
504      */
505     {
506         gs_memory_retrying_t *rmem;
507         rmem = (gs_memory_retrying_t *)
508             gs_alloc_bytes_immovable((gs_memory_t *)lmem,
509                                      sizeof(gs_memory_retrying_t),
510                                      "gs_malloc_wrap(retrying)");
511         if (rmem == 0) {
512             gs_memory_locked_release(lmem);
513             gs_free_object(cmem, lmem, "gs_malloc_wrap(locked)");
514             return_error(gs_error_VMerror);
515         }
516         code = gs_memory_retrying_init(rmem, (gs_memory_t *)lmem);
517         if (code < 0) {
518             gs_free_object((gs_memory_t *)lmem, rmem, "gs_malloc_wrap(retrying)");
519             gs_memory_locked_release(lmem);
520             gs_free_object(cmem, lmem, "gs_malloc_wrap(locked)");
521             return code;
522         }
523 
524         *wrapped = (gs_memory_t *)rmem;
525     }
526 #  endif /* retrying */
527     return 0;
528 }
529 
530 /* Get the wrapped contents. */
531 gs_malloc_memory_t *
gs_malloc_wrapped_contents(gs_memory_t * wrapped)532 gs_malloc_wrapped_contents(gs_memory_t *wrapped)
533 {
534 #ifdef USE_RETRY_MEMORY_WRAPPER
535     gs_memory_retrying_t *rmem = (gs_memory_retrying_t *)wrapped;
536 
537     return (gs_malloc_memory_t *)gs_memory_retrying_target(rmem);
538 #else /* retrying */
539     return (gs_malloc_memory_t *)wrapped;
540 #endif /* retrying */
541 }
542 
543 /* Free the wrapper, and return the wrapped contents. */
544 gs_malloc_memory_t *
gs_malloc_unwrap(gs_memory_t * wrapped)545 gs_malloc_unwrap(gs_memory_t *wrapped)
546 {
547 #ifdef USE_RETRY_MEMORY_WRAPPER
548     gs_memory_retrying_t *rmem = (gs_memory_retrying_t *)wrapped;
549     gs_memory_t *contents = gs_memory_retrying_target(rmem);
550 
551     gs_free_object(wrapped rmem, "gs_malloc_unwrap(retrying)");
552     return (gs_malloc_memory_t *)contents;
553 #else
554     return (gs_malloc_memory_t *)wrapped;
555 #endif
556 }
557 
558 /* Create the default allocator, and return it. */
559 gs_memory_t *
gs_malloc_init(void)560 gs_malloc_init(void)
561 {
562     gs_malloc_memory_t *malloc_memory_default = gs_malloc_memory_init();
563     gs_memory_t *memory_t_default;
564 
565     if (malloc_memory_default == NULL)
566         return NULL;
567 
568     if (gs_lib_ctx_init((gs_memory_t *)malloc_memory_default) != 0)
569         return NULL;
570 
571 #if defined(USE_RETRY_MEMORY_WRAPPER)
572     gs_malloc_wrap(&memory_t_default, malloc_memory_default);
573 #else
574     memory_t_default = (gs_memory_t *)malloc_memory_default;
575 #endif
576     memory_t_default->stable_memory = memory_t_default;
577     return memory_t_default;
578 }
579 
580 /* Release the default allocator. */
581 void
gs_malloc_release(gs_memory_t * mem)582 gs_malloc_release(gs_memory_t *mem)
583 {
584 #ifdef USE_RETRY_MEMORY_WRAPPER
585     gs_malloc_memory_t * malloc_memory_default = gs_malloc_unwrap(mem);
586 #else
587     gs_malloc_memory_t * malloc_memory_default = (gs_malloc_memory_t *)mem;
588 #endif
589 
590     gs_malloc_memory_release(malloc_memory_default);
591 }
592