1 /* Copyright (C) 2001-2019 Artifex Software, Inc.
2    All Rights Reserved.
3 
4    This software is provided AS-IS with no warranty, either express or
5    implied.
6 
7    This software is distributed under license and may not be copied,
8    modified or distributed except as expressly authorized under the terms
9    of the license contained in the file LICENSE in this distribution.
10 
11    Refer to licensing information at http://www.artifex.com or contact
12    Artifex Software, Inc.,  1305 Grant Avenue - Suite 200, Novato,
13    CA 94945, U.S.A., +1(415)492-9861, for further information.
14 */
15 
16 
17 /* Structure definitions for standard allocator */
18 /* Requires gsmemory.h, gsstruct.h */
19 
20 #ifndef gxalloc_INCLUDED
21 #  define gxalloc_INCLUDED
22 
23 #include "gsalloc.h"
24 #include "gxobj.h"
25 #include "scommon.h"
26 
27 /* ================ Clumps ================ */
28 
29 /*
30  * We obtain memory from the operating system in `clumps'.  A clump
31  * may hold only a single large object (or string), or it may hold
32  * many objects (allocated from the bottom up, always aligned)
33  * and strings (allocated from the top down, not aligned).
34  */
35 
36 /*
37  * Refs are allocated in the bottom-up section, along with struct objects.
38  * In order to keep the overhead for refs small, we make consecutive
39  * blocks of refs into a single allocator object of type st_refs.
40  * To do this, we remember the start of the current ref object (if any),
41  * and the end of the last block of allocated refs.  As long as
42  * the latter is equal to the top of the allocated area, we can add
43  * more refs to the current object; otherwise, we have to start a new one.
44  * We assume that sizeof(ref) % obj_align_mod == 0; this means that if we
45  * ever have to pad a block of refs, we never add as much as one entire ref.
46  */
47 
48 /*
49  * When we do a save, we create a new 'inner' clump out of the remaining
50  * space in the currently active clump.  Inner clumps must not be freed
51  * by a restore.
52  *
53  * The garbage collector implements relocation for refs by scanning
54  * forward to a free object.  Because of this, every ref object must end
55  * with a dummy ref that can hold the relocation for the last block.
56  * In order to put a reasonable upper bound on the scanning time, we
57  * limit the length of the objects that contain runs of refs.
58  */
59 #define max_size_st_refs (50 * sizeof(ref))
60 
61 /*
62  * Strings carry some additional overhead for use by the GC.
63  * At the top of the clump is a table of relocation values for
64  * 16N-character blocks of strings, where N is sizeof(uint).
65  * This table is aligned, by adding padding above it if necessary.
66  * Just below it is a mark table for the strings.  This table is also aligned,
67  * to improve GC performance. The actual string data start below
68  * the mark table.  These tables are not needed for a clump that holds
69  * a single large (non-string) object, but they are needed for all other
70  * clumps, including clumps created to hold a single large string.
71  */
72 
73 /*
74  * Define the unit of data manipulation for marking strings.
75  */
76 typedef uint string_mark_unit;
77 
78 #define log2_sizeof_string_mark_unit ARCH_LOG2_SIZEOF_INT
79 /*
80  * Define the quantum of relocation for strings, which determines
81  * the quantum for reserving space.  This value must be a power of 2,
82  * must be at least sizeof(string_mark_unit) * 8, and (because of the
83  * unrolled loops in igcstr.c) currently must be equal to either 32 or 64.
84  */
85 typedef uint string_reloc_offset;
86 
87 #define log2_string_data_quantum (ARCH_LOG2_SIZEOF_INT + 4)
88 #define string_data_quantum (1 << log2_string_data_quantum)
89 /*
90  * Define the quantum for reserving string space, including data,
91  * marks, and relocation.
92  */
93 #define string_space_quantum\
94   (string_data_quantum + (string_data_quantum / 8) +\
95    sizeof(string_reloc_offset))
96 /*
97  * Compute the amount of space needed for a clump that holds only
98  * a string of a given size.
99  */
100 #define string_clump_space(nbytes)\
101   (((nbytes) + (string_data_quantum - 1)) / string_data_quantum *\
102    string_space_quantum)
103 /*
104  * Compute the number of string space quanta in a given amount of storage.
105  */
106 #define string_space_quanta(spacebytes)\
107   ((spacebytes) / string_space_quantum)
108 /*
109  * Compute the size of string marks for a given number of quanta.
110  */
111 #define string_quanta_mark_size(nquanta)\
112   ((nquanta) * (string_data_quantum / 8))
113 /*
114  * Compute the size of the string freelists for a clump.
115  */
116 #define STRING_FREELIST_SPACE(cp)\
117   (((cp->climit - csbase(cp) + 255) >> 8) * sizeof(*cp->sfree1))
118 
119 /*
120  * To allow the garbage collector to combine clumps, we store in the
121  * head of each clump the address to which its contents will be moved.
122  */
123 /*typedef struct clump_head_s clump_head_t; *//* in gxobj.h */
124 
125 /* Structure for a clump. */
126 typedef struct clump_s clump_t;
127 struct clump_s {
128     clump_head_t *chead;	/* clump head, bottom of clump; */
129     /* csbase is an alias for chead */
130 #define csbase(cp) ((byte *)(cp)->chead)
131     /* Note that allocation takes place both from the bottom up */
132     /* (aligned objects) and from the top down (strings). */
133     byte *cbase;		/* bottom of clump data area */
134     byte *int_freed_top;	/* top of most recent internal free area */
135                                 /* in clump (which may no longer be free), */
136                                 /* used to decide when to consolidate */
137                                 /* trailing free space in allocated area */
138     byte *cbot;			/* bottom of free area */
139                                 /* (top of aligned objects) */
140     obj_header_t *rcur;		/* current refs object, 0 if none */
141     byte *rtop;			/* top of rcur */
142     byte *ctop;			/* top of free area */
143                                 /* (bottom of strings) */
144     byte *climit;		/* top of strings */
145     byte *cend;			/* top of clump */
146     clump_t *parent;            /* splay tree parent clump */
147     clump_t *left;		/* splay tree left clump */
148     clump_t *right;		/* splay tree right clump */
149     clump_t *outer;		/* the clump of which this is */
150                                 /*   an inner clump, if any */
151     uint inner_count;		/* number of clumps of which this is */
152                                 /*   the outer clump, if any */
153     bool has_refs;		/* true if any refs in clump */
154     bool c_alone;               /* this clump is for a single allocation */
155     /*
156      * Free lists for single bytes in blocks of 1 to 2*N-1 bytes, one per
157      * 256 bytes in [csbase..climit), where N is sizeof(uint). The chain
158      * pointer is a (1-byte) self-relative offset, terminated by a 0;
159      * obviously, the chain is sorted by increasing address.  The free list
160      * pointers themselves are offsets relative to csbase.
161      *
162      * Note that these lists overlay the GC relocation table, and that
163      * sizeof(*sfree1) / 256 must be less than sizeof(string_reloc_offset) /
164      * string_data_quantum (as real numbers).
165      */
166 #define SFREE_NB 4		/* # of bytes for values on sfree list */
167     uint *sfree1;
168     /*
169      * Free list for blocks of >= 2*N bytes.  Each block begins
170      * with a N-byte size and a N-byte next block pointer,
171      * both big-endian.  This too is sorted in increasing address order.
172      */
173     uint sfree;
174     /* The remaining members are for the GC. */
175     byte *odest;		/* destination for objects */
176     byte *smark;		/* mark bits for strings */
177     uint smark_size;
178     byte *sbase;		/* base for computing smark offsets */
179     string_reloc_offset *sreloc;	/* relocation for string blocks */
180     byte *sdest;		/* destination for (top of) strings */
181     byte *rescan_bot;		/* bottom of rescanning range if */
182                                 /* the GC mark stack overflows */
183     byte *rescan_top;		/* top of range ditto */
184 };
185 
186 /* The clump descriptor is exported only for isave.c. */
187 extern_st(st_clump);
188 #define public_st_clump()	/* in ialloc.c */\
189   gs_public_st_ptrs3(st_clump, clump_t, "clump_t",\
190     clump_enum_ptrs, clump_reloc_ptrs, left, right, parent)
191 
192 /*
193  * Macros for scanning a clump linearly, with the following schema:
194  *      SCAN_CLUMP_OBJECTS(cp)                  << declares pre, size >>
195  *              << code for all objects -- size not set yet >>
196  *      DO_ALL
197  *              << code for all objects -- size is set >>
198  *      END_OBJECTS_SCAN
199  *
200  * NB on error END_OBJECTS_SCAN calls gs_abort in debug systems.
201  */
202 #define SCAN_CLUMP_OBJECTS(cp)\
203         {	obj_header_t *pre = (obj_header_t *)((cp)->cbase);\
204                 obj_header_t *end = (obj_header_t *)((cp)->cbot);\
205                 uint size;\
206 \
207                 for ( ; pre < end;\
208                         pre = (obj_header_t *)((char *)pre + obj_size_round(size))\
209                     )\
210                 {
211 #define DO_ALL\
212                         size = pre_obj_contents_size(pre);\
213                         {
214 #define END_OBJECTS_SCAN_NO_ABORT\
215                         }\
216                 }\
217         }
218 #ifdef DEBUG
219 #  define END_OBJECTS_SCAN\
220                         }\
221                 }\
222                 if ( pre != end )\
223                 {	lprintf2("Clump parsing error, 0x%lx != 0x%lx\n",\
224                                  (ulong)pre, (ulong)end);\
225                     /*gs_abort((const gs_memory_t *)NULL);*/	\
226                 }\
227         }
228 #else
229 #  define END_OBJECTS_SCAN END_OBJECTS_SCAN_NO_ABORT
230 #endif
231 
232 /* Initialize a clump. */
233 /* This is exported for save/restore. */
234 void alloc_init_clump(clump_t *, byte *, byte *, bool, clump_t *);
235 
236 /* Initialize the string freelists in a clump. */
237 void alloc_init_free_strings(clump_t *);
238 
239 /* Find the clump for a pointer. */
240 /* Note that ptr_is_within_clump returns true even if the pointer */
241 /* is in an inner clump of the clump being tested. */
242 #define ptr_is_within_clump(ptr, cp)\
243   PTR_BETWEEN((const byte *)(ptr), (cp)->cbase, (cp)->cend)
244 #define ptr_is_in_inner_clump(ptr, cp)\
245   ((cp)->inner_count != 0 &&\
246    PTR_BETWEEN((const byte *)(ptr), (cp)->cbot, (cp)->ctop))
247 #define ptr_is_in_clump(ptr, cp)\
248   (ptr_is_within_clump(ptr, cp) && !ptr_is_in_inner_clump(ptr, cp))
249 typedef struct clump_locator_s {
250     gs_ref_memory_t *memory;	/* for head & tail of chain */
251     clump_t *cp;		/* one-element cache */
252 } clump_locator_t;
253 bool clump_locate_ptr(const void *, clump_locator_t *);
254 bool ptr_is_within_mem_clumps(const void *ptr, gs_ref_memory_t *mem);
255 
256 #define clump_locate(ptr, clp)\
257   (((clp)->cp != 0 && ptr_is_in_clump(ptr, (clp)->cp)) ||\
258    clump_locate_ptr(ptr, clp))
259 
260 /* Close up the current clump. */
261 /* This is exported for save/restore and for the GC. */
262 void alloc_close_clump(gs_ref_memory_t * mem);
263 
264 /* Reopen the current clump after a GC. */
265 void alloc_open_clump(gs_ref_memory_t * mem);
266 
267 /* Insert or remove a clump in the address-ordered chain. */
268 /* These are exported for the GC. */
269 void alloc_link_clump(clump_t *, gs_ref_memory_t *);
270 void alloc_unlink_clump(clump_t *, gs_ref_memory_t *);
271 
272 /* Free a clump.  This is exported for save/restore and for the GC. */
273 void alloc_free_clump(clump_t *, gs_ref_memory_t *);
274 
275 /* Print a clump debugging message. */
276 /* Unfortunately, the ANSI C preprocessor doesn't allow us to */
277 /* define the list of variables being printed as a macro. */
278 #define dprintf_clump_format\
279   "%s 0x%lx (0x%lx..0x%lx, 0x%lx..0x%lx..0x%lx)\n"
280 #define dmprintf_clump(mem, msg, cp)\
281   dmprintf7(mem, dprintf_clump_format,\
282             msg, (ulong)(cp), (ulong)(cp)->cbase, (ulong)(cp)->cbot,\
283             (ulong)(cp)->ctop, (ulong)(cp)->climit, (ulong)(cp)->cend)
284 #define if_debug_clump(c, mem, msg, cp)\
285   if_debug7m(c, mem,dprintf_clump_format,\
286              msg, (ulong)(cp), (ulong)(cp)->cbase, (ulong)(cp)->cbot,\
287              (ulong)(cp)->ctop, (ulong)(cp)->climit, (ulong)(cp)->cend)
288 
289 /* ================ Allocator state ================ */
290 
291 /* Structures for save/restore (not defined here). */
292 struct alloc_save_s;
293 struct alloc_change_s;
294 
295 /*
296  * Ref (PostScript object) type, only needed for the binary_token_names
297  * member of the state.
298  */
299 typedef struct ref_s ref;
300 
301 /*
302  * Define the number of freelists.  The index in the freelist array
303  * is the ceiling of the size of the object contents (i.e., not including
304  * the header) divided by obj_align_mod. There is an extra entry used to
305  * keep a list of all free blocks > max_freelist_size.
306  */
307 #define max_freelist_size 800	/* big enough for gstate & contents */
308 #define num_small_freelists\
309   ((max_freelist_size + obj_align_mod - 1) / obj_align_mod + 1)
310 #define num_freelists (num_small_freelists + 1)
311 
312 /*
313  * Define the index of the freelist containing all free blocks >
314  * max_freelist_size.
315  */
316 #define LARGE_FREELIST_INDEX	num_small_freelists
317 
318 /* Define the memory manager subclass for this allocator. */
319 struct gs_ref_memory_s {
320     /* The following are set at initialization time. */
321     gs_memory_common;
322     size_t clump_size;
323     size_t large_size;		/* min size to give large object */
324                                 /* its own clump: must be */
325                                 /* 1 mod obj_align_mod */
326     uint space;			/* a_local, a_global, a_system */
327 #   if IGC_PTR_STABILITY_CHECK
328     unsigned space_id:3; /* r_space_bits + 1 bit for "instability". */
329 #   endif
330     /* Callers can change the following dynamically */
331     /* (through a procedural interface). */
332     gs_memory_gc_status_t gc_status;
333     /* The following are updated dynamically. */
334     bool is_controlled;		/* if true, this allocator doesn't manage */
335                                 /* its own clumps */
336     size_t limit;		/* signal a VMerror when total */
337                                 /* allocated exceeds this */
338     clump_t *root;		/* root of clump splay tree */
339     clump_t *cc;		/* current clump */
340     clump_locator_t cfreed;	/* clump where last object freed */
341     size_t allocated;		/* total size of all clumps */
342                                 /* allocated at this save level */
343     size_t gc_allocated;	/* value of (allocated + */
344                                 /* previous_status.allocated) after last GC */
345     struct lost_ {		/* space freed and 'lost' (not put on a */
346                                 /* freelist) */
347         size_t objects;
348         size_t refs;
349         size_t strings;
350     } lost;
351     /*
352      * The following are for the interpreter's convenience: the
353      * library initializes them as indicated and then never touches them.
354      */
355     int save_level;		/* # of saves with non-zero id */
356     uint new_mask;		/* l_new or 0 (default) */
357     uint test_mask;		/* l_new or ~0 (default) */
358     stream *streams;		/* streams allocated at current level */
359     ref *names_array;		/* system_names or user_names, if needed */
360     /* Garbage collector information */
361     gs_gc_root_t *roots;	/* roots for GC */
362     /* Sharing / saved state information */
363     int num_contexts;		/* # of contexts sharing this VM */
364     struct alloc_change_s *changes;
365     struct alloc_change_s *scan_limit;
366     struct alloc_save_s *saved;
367     long total_scanned;
368     long total_scanned_after_compacting;
369     struct alloc_save_s *reloc_saved;	/* for GC */
370     gs_memory_status_t previous_status;		/* total allocated & used */
371                                 /* in outer save levels */
372     size_t largest_free_size;	/* largest (aligned) size on large block list */
373     /* We put the freelists last to keep the scalar offsets small. */
374     obj_header_t *freelists[num_freelists];
375 };
376 
377 /* The descriptor for gs_ref_memory_t is exported only for */
378 /* the alloc_save_t subclass; otherwise, it should be private. */
379 extern_st(st_ref_memory);
380 #define public_st_ref_memory()	/* in gsalloc.c */\
381   gs_public_st_composite(st_ref_memory, gs_ref_memory_t,\
382     "gs_ref_memory", ref_memory_enum_ptrs, ref_memory_reloc_ptrs)
383 #define st_ref_memory_max_ptrs 5  /* streams, names_array, changes, scan_limit, saved */
384 
385 /* Define the procedures for the standard allocator. */
386 /* We export this for subclasses. */
387 extern const gs_memory_procs_t gs_ref_memory_procs;
388 
389 /*
390  * Scan the clumps of an allocator:
391  *      SCAN_MEM_CLUMPS(mem, cp)
392  *              << code to process clump cp >>
393  *      END_CLUMPS_SCAN
394  */
395 #define SCAN_MEM_CLUMPS(mem, cp)\
396         {	clump_splay_walker sw;\
397                 clump_t *cp;\
398                 for (cp = clump_splay_walk_init(&sw, mem); cp != 0; cp = clump_splay_walk_fwd(&sw))\
399                 {
400 #define END_CLUMPS_SCAN\
401                 }\
402         }
403 
404 /* ================ Debugging ================ */
405 
406 #ifdef DEBUG
407 
408 /*
409  * Define the options for a memory dump.  These may be or'ed together.
410  */
411 typedef enum {
412     dump_do_default = 0,	/* pro forma */
413     dump_do_strings = 1,
414     dump_do_type_addresses = 2,
415     dump_do_no_types = 4,
416     dump_do_pointers = 8,
417     dump_do_pointed_strings = 16,	/* only if do_pointers also set */
418     dump_do_contents = 32,
419     dump_do_marks = 64
420 } dump_options_t;
421 
422 /*
423  * Define all the parameters controlling what gets dumped.
424  */
425 typedef struct dump_control_s {
426     dump_options_t options;
427     const byte *bottom;
428     const byte *top;
429 } dump_control_t;
430 
431 /* Define the two most useful dump control structures. */
432 extern const dump_control_t dump_control_default;
433 extern const dump_control_t dump_control_all;
434 
435 /* ------ Procedures ------ */
436 
437 /* Print one object with the given options. */
438 /* Relevant options: type_addresses, no_types, pointers, pointed_strings, */
439 /* contents. */
440 void debug_print_object(const gs_memory_t *mem, const void *obj, const dump_control_t * control);
441 
442 /* Print the contents of a clump with the given options. */
443 /* Relevant options: all. */
444 void debug_dump_clump(const gs_memory_t *mem, const clump_t * cp, const dump_control_t * control);
445 void debug_print_clump(const gs_memory_t *mem, const clump_t * cp);	/* default options */
446 
447 /* Print the contents of all clumps managed by an allocator. */
448 /* Relevant options: all. */
449 void debug_dump_memory(const gs_ref_memory_t *mem,
450                        const dump_control_t *control);
451 
452 /* convenience routine to call debug_dump_memory(), defaults
453    dump_control_t to dump_control_all.  */
454 void debug_dump_allocator(const gs_ref_memory_t *mem);
455 
456 /* Find all the objects that contain a given pointer. */
457 void debug_find_pointers(const gs_ref_memory_t *mem, const void *target);
458 
459 /* Dump the splay tree of clumps */
460 void debug_dump_clump_tree(const gs_ref_memory_t *mem);
461 
462 #endif /* DEBUG */
463 
464 /* Routines for walking/manipulating the splay tree of clumps */
465 typedef enum {
466     SPLAY_APP_CONTINUE = 0,
467     SPLAY_APP_STOP = 1
468 } splay_app_result_t;
469 
470 /* Apply function 'fn' to every node in the clump splay tree.
471  * These are applied in depth first order, which means that
472  * 'fn' is free to perform any operation it likes on the subtree
473  * rooted at the current node, as long as it doesn't affect
474  * any nodes higher in the tree than the current node. The
475  * classic example of this is deletion, which can cause the
476  * subtree to be rrarranged. */
477 clump_t *clump_splay_app(clump_t *root, gs_ref_memory_t *imem, splay_app_result_t (*fn)(clump_t *, void *), void *arg);
478 
479 typedef enum
480 {
481     /* As we step, keep track of where we just stepped from. */
482     SPLAY_FROM_ABOVE = 0,
483     SPLAY_FROM_LEFT = 1,
484     SPLAY_FROM_RIGHT = 2
485 } splay_dir_t;
486 
487 /* Structure to contain details of a 'walker' for the clump
488  * splay tree. Treat this as opaque. Only defined at this
489  * public level to avoid the need for mallocs.
490  * No user servicable parts inside. */
491 typedef struct
492 {
493     /* The direction we most recently moved in the
494      * splay tree traversal. */
495     splay_dir_t from;
496     /* The current node in the splay tree traversal. */
497     clump_t *cp;
498     /* The node that marks the end of the splay tree traversal.
499      * (NULL for top of tree). */
500     clump_t *end;
501 } clump_splay_walker;
502 
503 /* Prepare a splay walker for walking forwards.
504  * It will perform an in-order traversal of the entire tree,
505  * starting at min and stopping after max with NULL. */
506 clump_t *clump_splay_walk_init(clump_splay_walker *sw, const gs_ref_memory_t *imem);
507 
508 /* Prepare a splay walker for walking forwards,
509  * starting from a point somewhere in the middle of the tree.
510  * The traveral starts at cp, proceeds in-order to max, then
511  * restarts at min, stopping with NULL after reaching mid again.
512  */
513 clump_t *clump_splay_walk_init_mid(clump_splay_walker *sw, clump_t *cp);
514 
515 /* Return the next node in the traversal of the clump
516  * splay tree as set up by clump_splay_walk_init{,_mid}.
517  * Will return NULL at the end point. */
518 clump_t *clump_splay_walk_fwd(clump_splay_walker *sw);
519 
520 /* Prepare a splay walker for walking backwards.
521  * It will perform a reverse-in-order traversal of the
522  * entire tree, starting at max, and stopping after min
523  * with NULL. */
524 clump_t *clump_splay_walk_bwd_init(clump_splay_walker *sw, const gs_ref_memory_t *imem);
525 
526 /* Return the next node in the traversal of the clump
527  * splay tree as set up by clump_splay_walk_bwd_init.
528  * Will return NULL at the end point. */
529 clump_t *clump_splay_walk_bwd(clump_splay_walker *sw);
530 
531 #endif /* gxalloc_INCLUDED */
532