1 /* Copyright (C) 2001-2006 Artifex Software, Inc.
2    All Rights Reserved.
3 
4    This software is provided AS-IS with no warranty, either express or
5    implied.
6 
7    This software is distributed under license and may not be copied, modified
8    or distributed except as expressly authorized under the terms of that
9    license.  Refer to licensing information at http://www.artifex.com/
10    or contact Artifex Software, Inc.,  7 Mt. Lassen Drive - Suite A-134,
11    San Rafael, CA  94903, U.S.A., +1(415)492-9861, for further information.
12 */
13 
14 /* $Id: gsmalloc.c 10005 2009-08-18 20:41:17Z ray $ */
15 /* C heap allocator */
16 #include "malloc_.h"
17 #include "gdebug.h"
18 #include "gserror.h"
19 #include "gserrors.h"
20 #include "gstypes.h"
21 #include "gsmemory.h"
22 #include "gsmdebug.h"
23 #include "gsstruct.h"		/* for st_bytes */
24 #include "gsmalloc.h"
25 #include "gsmemret.h"		/* retrying wrapper */
26 
27 
28 /* ------ Heap allocator ------ */
29 
30 /*
31  * An implementation of Ghostscript's memory manager interface
32  * that works directly with the C heap.  We keep track of all allocated
33  * blocks so we can free them at cleanup time.
34  */
35 /* Raw memory procedures */
36 static gs_memory_proc_alloc_bytes(gs_heap_alloc_bytes);
37 static gs_memory_proc_resize_object(gs_heap_resize_object);
38 static gs_memory_proc_free_object(gs_heap_free_object);
39 static gs_memory_proc_stable(gs_heap_stable);
40 static gs_memory_proc_status(gs_heap_status);
41 static gs_memory_proc_free_all(gs_heap_free_all);
42 
43 /* Object memory procedures */
44 static gs_memory_proc_alloc_struct(gs_heap_alloc_struct);
45 static gs_memory_proc_alloc_byte_array(gs_heap_alloc_byte_array);
46 static gs_memory_proc_alloc_struct_array(gs_heap_alloc_struct_array);
47 static gs_memory_proc_object_size(gs_heap_object_size);
48 static gs_memory_proc_object_type(gs_heap_object_type);
49 static gs_memory_proc_alloc_string(gs_heap_alloc_string);
50 static gs_memory_proc_resize_string(gs_heap_resize_string);
51 static gs_memory_proc_free_string(gs_heap_free_string);
52 static gs_memory_proc_register_root(gs_heap_register_root);
53 static gs_memory_proc_unregister_root(gs_heap_unregister_root);
54 static gs_memory_proc_enable_free(gs_heap_enable_free);
55 static const gs_memory_procs_t gs_malloc_memory_procs =
56 {
57     /* Raw memory procedures */
58     gs_heap_alloc_bytes,
59     gs_heap_resize_object,
60     gs_heap_free_object,
61     gs_heap_stable,
62     gs_heap_status,
63     gs_heap_free_all,
64     gs_ignore_consolidate_free,
65     /* Object memory procedures */
66     gs_heap_alloc_bytes,
67     gs_heap_alloc_struct,
68     gs_heap_alloc_struct,
69     gs_heap_alloc_byte_array,
70     gs_heap_alloc_byte_array,
71     gs_heap_alloc_struct_array,
72     gs_heap_alloc_struct_array,
73     gs_heap_object_size,
74     gs_heap_object_type,
75     gs_heap_alloc_string,
76     gs_heap_alloc_string,
77     gs_heap_resize_string,
78     gs_heap_free_string,
79     gs_heap_register_root,
80     gs_heap_unregister_root,
81     gs_heap_enable_free
82 };
83 
84 /* We must make sure that malloc_blocks leave the block aligned. */
85 /*typedef struct gs_malloc_block_s gs_malloc_block_t; */
86 #define malloc_block_data\
87 	gs_malloc_block_t *next;\
88 	gs_malloc_block_t *prev;\
89 	uint size;\
90 	gs_memory_type_ptr_t type;\
91 	client_name_t cname
92 struct malloc_block_data_s {
93     malloc_block_data;
94 };
95 struct gs_malloc_block_s {
96     malloc_block_data;
97 /* ANSI C does not allow zero-size arrays, so we need the following */
98 /* unnecessary and wasteful workaround: */
99 #define _npad (-size_of(struct malloc_block_data_s) & (ARCH_ALIGN_MEMORY_MOD - 1))
100     byte _pad[(_npad == 0 ? ARCH_ALIGN_MEMORY_MOD : _npad)];
101 #undef _npad
102 };
103 
104 /* Initialize a malloc allocator. */
105 static long heap_available(void);
106 gs_malloc_memory_t *
gs_malloc_memory_init(void)107 gs_malloc_memory_init(void)
108 {
109     gs_malloc_memory_t *mem =
110 	(gs_malloc_memory_t *)malloc(sizeof(gs_malloc_memory_t));
111 
112     mem->stable_memory = 0;	/* just for tidyness, never referenced */
113     mem->procs = gs_malloc_memory_procs;
114     mem->allocated = 0;
115     mem->limit = max_long;
116     mem->used = 0;
117     mem->max_used = 0;
118     mem->gs_lib_ctx = 0;
119     mem->non_gc_memory = (gs_memory_t *)mem;
120     /* Allocate a monitor to serialize access to structures within */
121     mem->monitor = NULL;	/* prevent use during initial allocation */
122     mem->monitor = gx_monitor_alloc((gs_memory_t *)mem);
123 
124     return mem;
125 }
126 /*
127  * Estimate the amount of available memory by probing with mallocs.
128  * We may under-estimate by a lot, but that's better than winding up with
129  * a seriously inflated address space.  This is quite a hack!
130  */
131 #define max_malloc_probes 20
132 #define malloc_probe_size 64000
133 static long
heap_available()134 heap_available()
135 {
136     long avail = 0;
137     void *probes[max_malloc_probes];
138     uint n;
139 
140     for (n = 0; n < max_malloc_probes; n++) {
141 	if ((probes[n] = malloc(malloc_probe_size)) == 0)
142 	    break;
143 	if_debug2('a', "[a]heap_available probe[%d]=0x%lx\n",
144 		  n, (ulong) probes[n]);
145 	avail += malloc_probe_size;
146     }
147     while (n)
148 	free(probes[--n]);
149     return avail;
150 }
151 
152 /* Allocate various kinds of blocks. */
153 static byte *
gs_heap_alloc_bytes(gs_memory_t * mem,uint size,client_name_t cname)154 gs_heap_alloc_bytes(gs_memory_t * mem, uint size, client_name_t cname)
155 {
156     gs_malloc_memory_t *mmem = (gs_malloc_memory_t *) mem;
157     byte *ptr = 0;
158 
159 #ifdef DEBUG
160     const char *msg;
161     static const char *const ok_msg = "OK";
162 
163 #  define set_msg(str) (msg = (str))
164 #else
165 #  define set_msg(str) DO_NOTHING
166 #endif
167 
168     	/* Exclusive acces so our decisions and changes are 'atomic' */
169     if (mmem->monitor)
170 	gx_monitor_enter(mmem->monitor);
171     if (size > mmem->limit - sizeof(gs_malloc_block_t)) {
172 	/* Definitely too large to allocate; also avoids overflow. */
173 	set_msg("exceeded limit");
174     } else {
175 	uint added = size + sizeof(gs_malloc_block_t);
176 
177 	if (added <= size || mmem->limit - added < mmem->used)
178 	    set_msg("exceeded limit");
179 	else if ((ptr = (byte *) malloc(added)) == 0)
180 	    set_msg("failed");
181 	else {
182 	    gs_malloc_block_t *bp = (gs_malloc_block_t *) ptr;
183 
184 	    /*
185 	     * We would like to check that malloc aligns blocks at least as
186 	     * strictly as the compiler (as defined by ARCH_ALIGN_MEMORY_MOD).
187 	     * However, Microsoft VC 6 does not satisfy this requirement.
188 	     * See gsmemory.h for more explanation.
189 	     */
190 	    set_msg(ok_msg);
191 	    if (mmem->allocated)
192 		mmem->allocated->prev = bp;
193 	    bp->next = mmem->allocated;
194 	    bp->prev = 0;
195 	    bp->size = size;
196 	    bp->type = &st_bytes;
197 	    bp->cname = cname;
198 	    mmem->allocated = bp;
199 	    ptr = (byte *) (bp + 1);
200 	    mmem->used += size + sizeof(gs_malloc_block_t);
201 	    if (mmem->used > mmem->max_used)
202 		mmem->max_used = mmem->used;
203 	}
204     }
205     if (mmem->monitor)
206 	gx_monitor_leave(mmem->monitor);	/* Done with exclusive access */
207     /* We don't want to 'fill' under mutex to keep the window smaller */
208     if (ptr)
209 	gs_alloc_fill(ptr, gs_alloc_fill_alloc, size);
210 #ifdef DEBUG
211     if (gs_debug_c('a') || msg != ok_msg)
212 	dlprintf6("[a+]gs_malloc(%s)(%u) = 0x%lx: %s, used=%ld, max=%ld\n",
213 		  client_name_string(cname), size, (ulong) ptr, msg, mmem->used, mmem->max_used);
214 #endif
215     return ptr;
216 #undef set_msg
217 }
218 static void *
gs_heap_alloc_struct(gs_memory_t * mem,gs_memory_type_ptr_t pstype,client_name_t cname)219 gs_heap_alloc_struct(gs_memory_t * mem, gs_memory_type_ptr_t pstype,
220 		     client_name_t cname)
221 {
222     void *ptr =
223     gs_heap_alloc_bytes(mem, gs_struct_type_size(pstype), cname);
224 
225     if (ptr == 0)
226 	return 0;
227     ((gs_malloc_block_t *) ptr)[-1].type = pstype;
228     return ptr;
229 }
230 static byte *
gs_heap_alloc_byte_array(gs_memory_t * mem,uint num_elements,uint elt_size,client_name_t cname)231 gs_heap_alloc_byte_array(gs_memory_t * mem, uint num_elements, uint elt_size,
232 			 client_name_t cname)
233 {
234     ulong lsize = (ulong) num_elements * elt_size;
235 
236     if (lsize != (uint) lsize)
237 	return 0;
238     return gs_heap_alloc_bytes(mem, (uint) lsize, cname);
239 }
240 static void *
gs_heap_alloc_struct_array(gs_memory_t * mem,uint num_elements,gs_memory_type_ptr_t pstype,client_name_t cname)241 gs_heap_alloc_struct_array(gs_memory_t * mem, uint num_elements,
242 			   gs_memory_type_ptr_t pstype, client_name_t cname)
243 {
244     void *ptr =
245     gs_heap_alloc_byte_array(mem, num_elements,
246 			     gs_struct_type_size(pstype), cname);
247 
248     if (ptr == 0)
249 	return 0;
250     ((gs_malloc_block_t *) ptr)[-1].type = pstype;
251     return ptr;
252 }
253 static void *
gs_heap_resize_object(gs_memory_t * mem,void * obj,uint new_num_elements,client_name_t cname)254 gs_heap_resize_object(gs_memory_t * mem, void *obj, uint new_num_elements,
255 		      client_name_t cname)
256 {
257     gs_malloc_memory_t *mmem = (gs_malloc_memory_t *) mem;
258     gs_malloc_block_t *ptr = (gs_malloc_block_t *) obj - 1;
259     gs_memory_type_ptr_t pstype = ptr->type;
260     uint old_size = gs_object_size(mem, obj) + sizeof(gs_malloc_block_t);
261     uint new_size =
262 	gs_struct_type_size(pstype) * new_num_elements +
263 	sizeof(gs_malloc_block_t);
264     gs_malloc_block_t *new_ptr;
265 
266     if (new_size == old_size)
267         return obj;
268     if (mmem->monitor)
269 	gx_monitor_enter(mmem->monitor);	/* Exclusive access */
270     new_ptr = (gs_malloc_block_t *) gs_realloc(ptr, old_size, new_size);
271     if (new_ptr == 0)
272 	return 0;
273     if (new_ptr->prev)
274 	new_ptr->prev->next = new_ptr;
275     else
276 	mmem->allocated = new_ptr;
277     if (new_ptr->next)
278 	new_ptr->next->prev = new_ptr;
279     new_ptr->size = new_size - sizeof(gs_malloc_block_t);
280     mmem->used -= old_size;
281     mmem->used += new_size;
282     if (mmem->monitor)
283 	gx_monitor_leave(mmem->monitor);	/* Done with exclusive access */
284     if (new_size > old_size)
285 	gs_alloc_fill((byte *) new_ptr + old_size,
286 		      gs_alloc_fill_alloc, new_size - old_size);
287     return new_ptr + 1;
288 }
289 static uint
gs_heap_object_size(gs_memory_t * mem,const void * ptr)290 gs_heap_object_size(gs_memory_t * mem, const void *ptr)
291 {
292     return ((const gs_malloc_block_t *)ptr)[-1].size;
293 }
294 static gs_memory_type_ptr_t
gs_heap_object_type(const gs_memory_t * mem,const void * ptr)295 gs_heap_object_type(const gs_memory_t * mem, const void *ptr)
296 {
297     return ((const gs_malloc_block_t *)ptr)[-1].type;
298 }
299 static void
gs_heap_free_object(gs_memory_t * mem,void * ptr,client_name_t cname)300 gs_heap_free_object(gs_memory_t * mem, void *ptr, client_name_t cname)
301 {
302     gs_malloc_memory_t *mmem = (gs_malloc_memory_t *) mem;
303     gs_malloc_block_t *bp;
304     gs_memory_type_ptr_t pstype;
305     struct_proc_finalize((*finalize));
306 
307     if_debug3('a', "[a-]gs_free(%s) 0x%lx(%u)\n",
308 	      client_name_string(cname), (ulong) ptr,
309 	      (ptr == 0 ? 0 : ((gs_malloc_block_t *) ptr)[-1].size));
310     if (ptr == 0)
311 	return;
312     pstype = ((gs_malloc_block_t *) ptr)[-1].type;
313     finalize = pstype->finalize;
314     if (finalize != 0) {
315 	if_debug3('u', "[u]finalizing %s 0x%lx (%s)\n",
316 		  struct_type_name_string(pstype),
317 		  (ulong) ptr, client_name_string(cname));
318 	(*finalize) (ptr);
319     }
320     if (mmem->monitor)
321 	gx_monitor_enter(mmem->monitor);	/* Exclusive access */
322     bp = mmem->allocated; /* If 'finalize' releases a memory,
323 			     this function could be called recursively and
324 			     change mmem->allocated. */
325     if (ptr == bp + 1) {
326 	mmem->allocated = bp->next;
327 	mmem->used -= bp->size + sizeof(gs_malloc_block_t);
328 
329 	if (mmem->allocated)
330 	    mmem->allocated->prev = 0;
331 	if (mmem->monitor)
332 	    gx_monitor_leave(mmem->monitor);	/* Done with exclusive access */
333 	gs_alloc_fill(bp, gs_alloc_fill_free,
334 		      bp->size + sizeof(gs_malloc_block_t));
335 	free(bp);
336     } else {
337 	gs_malloc_block_t *np;
338 
339 	/*
340 	 * bp == 0 at this point is an error, but we'd rather have an
341 	 * error message than an invalid access.
342 	 */
343 	if (bp) {
344 	    for (; (np = bp->next) != 0; bp = np) {
345 		if (ptr == np + 1) {
346 		    bp->next = np->next;
347 		    if (np->next)
348 			np->next->prev = bp;
349 		    mmem->used -= np->size + sizeof(gs_malloc_block_t);
350 		    if (mmem->monitor)
351 			gx_monitor_leave(mmem->monitor);	/* Done with exclusive access */
352 		    gs_alloc_fill(np, gs_alloc_fill_free,
353 				  np->size + sizeof(gs_malloc_block_t));
354 		    free(np);
355 		    return;
356 		}
357 	    }
358 	}
359 	if (mmem->monitor)
360 	    gx_monitor_leave(mmem->monitor);	/* Done with exclusive access */
361 	lprintf2("%s: free 0x%lx not found!\n",
362 		 client_name_string(cname), (ulong) ptr);
363 	free((char *)((gs_malloc_block_t *) ptr - 1));
364     }
365 }
366 static byte *
gs_heap_alloc_string(gs_memory_t * mem,uint nbytes,client_name_t cname)367 gs_heap_alloc_string(gs_memory_t * mem, uint nbytes, client_name_t cname)
368 {
369     return gs_heap_alloc_bytes(mem, nbytes, cname);
370 }
371 static byte *
gs_heap_resize_string(gs_memory_t * mem,byte * data,uint old_num,uint new_num,client_name_t cname)372 gs_heap_resize_string(gs_memory_t * mem, byte * data, uint old_num, uint new_num,
373 		      client_name_t cname)
374 {
375     if (gs_heap_object_type(mem, data) != &st_bytes)
376 	lprintf2("%s: resizing non-string 0x%lx!\n",
377 		 client_name_string(cname), (ulong) data);
378     return gs_heap_resize_object(mem, data, new_num, cname);
379 }
380 static void
gs_heap_free_string(gs_memory_t * mem,byte * data,uint nbytes,client_name_t cname)381 gs_heap_free_string(gs_memory_t * mem, byte * data, uint nbytes,
382 		    client_name_t cname)
383 {
384     /****** SHOULD CHECK SIZE IF DEBUGGING ******/
385     gs_heap_free_object(mem, data, cname);
386 }
387 static int
gs_heap_register_root(gs_memory_t * mem,gs_gc_root_t * rp,gs_ptr_type_t ptype,void ** up,client_name_t cname)388 gs_heap_register_root(gs_memory_t * mem, gs_gc_root_t * rp,
389 		      gs_ptr_type_t ptype, void **up, client_name_t cname)
390 {
391     return 0;
392 }
393 static void
gs_heap_unregister_root(gs_memory_t * mem,gs_gc_root_t * rp,client_name_t cname)394 gs_heap_unregister_root(gs_memory_t * mem, gs_gc_root_t * rp,
395 			client_name_t cname)
396 {
397 }
398 static gs_memory_t *
gs_heap_stable(gs_memory_t * mem)399 gs_heap_stable(gs_memory_t *mem)
400 {
401     return mem;			/* heap memory is stable */
402 }
403 
404 /*
405  * NB: In a multi-threaded application, this is only a 'snapshot'
406  *     since other threads may change the heap_status. The heap_available()
407  *     probe is just an approximation anyway.
408  */
409 static void
gs_heap_status(gs_memory_t * mem,gs_memory_status_t * pstat)410 gs_heap_status(gs_memory_t * mem, gs_memory_status_t * pstat)
411 {
412     gs_malloc_memory_t *mmem = (gs_malloc_memory_t *) mem;
413 
414     pstat->allocated = mmem->used + heap_available();
415     pstat->used = mmem->used;
416 }
417 static void
gs_heap_enable_free(gs_memory_t * mem,bool enable)418 gs_heap_enable_free(gs_memory_t * mem, bool enable)
419 {
420     if (enable)
421 	mem->procs.free_object = gs_heap_free_object,
422 	    mem->procs.free_string = gs_heap_free_string;
423     else
424 	mem->procs.free_object = gs_ignore_free_object,
425 	    mem->procs.free_string = gs_ignore_free_string;
426 }
427 
428 /* Release all memory acquired by this allocator. */
429 static void
gs_heap_free_all(gs_memory_t * mem,uint free_mask,client_name_t cname)430 gs_heap_free_all(gs_memory_t * mem, uint free_mask, client_name_t cname)
431 {
432     gs_malloc_memory_t *const mmem = (gs_malloc_memory_t *) mem;
433     gx_monitor_t *mon = mmem->monitor;
434 
435     /*
436      * We don't perform locking during this process since the 'monitor'
437      * is contained in this allocator, and will get freed along the way.
438      * It is only called at exit, and there better not be any threads
439      * accessing this allocator.
440      */
441     mmem->monitor = NULL; 	/* delete reference to this monitor */
442     gx_monitor_free(mon);	/* free the monitor */
443     if (free_mask & FREE_ALL_DATA) {
444 	gs_malloc_block_t *bp = mmem->allocated;
445 	gs_malloc_block_t *np;
446 
447 	for (; bp != 0; bp = np) {
448 	    np = bp->next;
449 	    if_debug3('a', "[a]gs_heap_free_all(%s) 0x%lx(%u)\n",
450 		      client_name_string(bp->cname), (ulong) (bp + 1),
451 		      bp->size);
452 	    gs_alloc_fill(bp + 1, gs_alloc_fill_free, bp->size);
453 	    free(bp);
454 	}
455     }
456     if (free_mask & FREE_ALL_ALLOCATOR)
457 	free(mem);
458 }
459 
460 /* ------ Wrapping ------ */
461 
462 /* Create the retrying and the locked wrapper for the heap allocator. */
463 int
gs_malloc_wrap(gs_memory_t ** wrapped,gs_malloc_memory_t * contents)464 gs_malloc_wrap(gs_memory_t **wrapped, gs_malloc_memory_t *contents)
465 {
466 #  ifdef USE_RETRY_MEMORY_WRAPPER
467     /*
468      * This is deprecated since 'retry' for clist reversion/cycling
469      * will ONLY work for monochrome, simple PS or PCL, not for a
470      * color device and not for PDF or XPS with transparency
471      */
472     {
473 	gs_memory_retrying_t *rmem;
474 	rmem = (gs_memory_retrying_t *)
475 	    gs_alloc_bytes_immovable((gs_memory_t *)lmem,
476 				     sizeof(gs_memory_retrying_t),
477 				     "gs_malloc_wrap(retrying)");
478 	if (rmem == 0) {
479 	    gs_memory_locked_release(lmem);
480 	    gs_free_object(cmem, lmem, "gs_malloc_wrap(locked)");
481 	    return_error(gs_error_VMerror);
482 	}
483 	code = gs_memory_retrying_init(rmem, (gs_memory_t *)lmem);
484 	if (code < 0) {
485 	    gs_free_object((gs_memory_t *)lmem, rmem, "gs_malloc_wrap(retrying)");
486 	    gs_memory_locked_release(lmem);
487 	    gs_free_object(cmem, lmem, "gs_malloc_wrap(locked)");
488 	    return code;
489 	}
490 
491 	*wrapped = (gs_memory_t *)rmem;
492     }
493 #  endif /* retrying */
494     return 0;
495 }
496 
497 /* Get the wrapped contents. */
498 gs_malloc_memory_t *
gs_malloc_wrapped_contents(gs_memory_t * wrapped)499 gs_malloc_wrapped_contents(gs_memory_t *wrapped)
500 {
501 #ifdef USE_RETRY_MEMORY_WRAPPER
502     gs_memory_retrying_t *rmem = (gs_memory_retrying_t *)wrapped;
503 
504     return (gs_malloc_memory_t *)gs_memory_retrying_target(rmem);
505 #else /* retrying */
506     return (gs_malloc_memory_t *)wrapped;
507 #endif /* retrying */
508 }
509 
510 /* Free the wrapper, and return the wrapped contents. */
511 gs_malloc_memory_t *
gs_malloc_unwrap(gs_memory_t * wrapped)512 gs_malloc_unwrap(gs_memory_t *wrapped)
513 {
514 #ifdef USE_RETRY_MEMORY_WRAPPER
515     gs_memory_retrying_t *rmem = (gs_memory_retrying_t *)wrapped;
516     gs_memory_t *contents = gs_memory_retrying_target(rmem);
517 
518     gs_free_object(wrapped rmem, "gs_malloc_unwrap(retrying)");
519     return (gs_malloc_memory_t *)contents;
520 #else
521     return (gs_malloc_memory_t *)wrapped;
522 #endif
523 }
524 
525 
526 /* Create the default allocator, and return it. */
527 gs_memory_t *
gs_malloc_init(const gs_memory_t * parent)528 gs_malloc_init(const gs_memory_t *parent)
529 {
530     gs_malloc_memory_t *malloc_memory_default = gs_malloc_memory_init();
531     gs_memory_t *memory_t_default;
532 
533     if (parent)
534 	malloc_memory_default->gs_lib_ctx = parent->gs_lib_ctx;
535     else
536         gs_lib_ctx_init((gs_memory_t *)malloc_memory_default);
537 
538 #if defined(USE_RETRY_MEMORY_WRAPPER)
539     gs_malloc_wrap(&memory_t_default, malloc_memory_default);
540 #else
541     memory_t_default = (gs_memory_t *)malloc_memory_default;
542 #endif
543     memory_t_default->stable_memory = memory_t_default;
544     return memory_t_default;
545 }
546 
547 /* Release the default allocator. */
548 void
gs_malloc_release(gs_memory_t * mem)549 gs_malloc_release(gs_memory_t *mem)
550 {
551 #ifdef USE_RETRY_MEMORY_WRAPPER
552     gs_malloc_memory_t * malloc_memory_default = gs_malloc_unwrap(mem);
553 #else
554     gs_malloc_memory_t * malloc_memory_default = (gs_malloc_memory_t *)mem;
555 #endif
556 
557     gs_malloc_memory_release(malloc_memory_default);
558 }
559