1 /* Copyright (C) 2001-2012 Artifex Software, Inc. 2 All Rights Reserved. 3 4 This software is provided AS-IS with no warranty, either express or 5 implied. 6 7 This software is distributed under license and may not be copied, 8 modified or distributed except as expressly authorized under the terms 9 of the license contained in the file LICENSE in this distribution. 10 11 Refer to licensing information at http://www.artifex.com or contact 12 Artifex Software, Inc., 7 Mt. Lassen Drive - Suite A-134, San Rafael, 13 CA 94903, U.S.A., +1(415)492-9861, for further information. 14 */ 15 16 17 /* Structure definitions for standard allocator */ 18 /* Requires gsmemory.h, gsstruct.h */ 19 20 #ifndef gxalloc_INCLUDED 21 # define gxalloc_INCLUDED 22 23 #ifndef gs_ref_memory_DEFINED 24 # define gs_ref_memory_DEFINED 25 typedef struct gs_ref_memory_s gs_ref_memory_t; 26 #endif 27 28 #include "gsalloc.h" 29 #include "gxobj.h" 30 31 /* ================ Chunks ================ */ 32 33 /* 34 * We obtain memory from the operating system in `chunks'. A chunk 35 * may hold only a single large object (or string), or it may hold 36 * many objects (allocated from the bottom up, always aligned) 37 * and strings (allocated from the top down, not aligned). 38 */ 39 40 /* 41 * Refs are allocated in the bottom-up section, along with struct objects. 42 * In order to keep the overhead for refs small, we make consecutive 43 * blocks of refs into a single allocator object of type st_refs. 44 * To do this, we remember the start of the current ref object (if any), 45 * and the end of the last block of allocated refs. As long as 46 * the latter is equal to the top of the allocated area, we can add 47 * more refs to the current object; otherwise, we have to start a new one. 48 * We assume that sizeof(ref) % obj_align_mod == 0; this means that if we 49 * ever have to pad a block of refs, we never add as much as one entire ref. 50 */ 51 52 /* 53 * When we do a save, we create a new 'inner' chunk out of the remaining 54 * space in the currently active chunk. Inner chunks must not be freed 55 * by a restore. 56 * 57 * The garbage collector implements relocation for refs by scanning 58 * forward to a free object. Because of this, every ref object must end 59 * with a dummy ref that can hold the relocation for the last block. 60 * In order to put a reasonable upper bound on the scanning time, we 61 * limit the length of the objects that contain runs of refs. 62 */ 63 #define max_size_st_refs (50 * sizeof(ref)) 64 65 /* 66 * Strings carry some additional overhead for use by the GC. 67 * At the top of the chunk is a table of relocation values for 68 * 16N-character blocks of strings, where N is sizeof(uint). 69 * This table is aligned, by adding padding above it if necessary. 70 * Just below it is a mark table for the strings. This table is also aligned, 71 * to improve GC performance. The actual string data start below 72 * the mark table. These tables are not needed for a chunk that holds 73 * a single large (non-string) object, but they are needed for all other 74 * chunks, including chunks created to hold a single large string. 75 */ 76 77 /* 78 * Define the unit of data manipulation for marking strings. 79 */ 80 typedef uint string_mark_unit; 81 82 #define log2_sizeof_string_mark_unit arch_log2_sizeof_int 83 /* 84 * Define the quantum of relocation for strings, which determines 85 * the quantum for reserving space. This value must be a power of 2, 86 * must be at least sizeof(string_mark_unit) * 8, and (because of the 87 * unrolled loops in igcstr.c) currently must be equal to either 32 or 64. 88 */ 89 typedef uint string_reloc_offset; 90 91 #define log2_string_data_quantum (arch_log2_sizeof_int + 4) 92 #define string_data_quantum (1 << log2_string_data_quantum) 93 /* 94 * Define the quantum for reserving string space, including data, 95 * marks, and relocation. 96 */ 97 #define string_space_quantum\ 98 (string_data_quantum + (string_data_quantum / 8) +\ 99 sizeof(string_reloc_offset)) 100 /* 101 * Compute the amount of space needed for a chunk that holds only 102 * a string of a given size. 103 */ 104 #define string_chunk_space(nbytes)\ 105 (((nbytes) + (string_data_quantum - 1)) / string_data_quantum *\ 106 string_space_quantum) 107 /* 108 * Compute the number of string space quanta in a given amount of storage. 109 */ 110 #define string_space_quanta(spacebytes)\ 111 ((spacebytes) / string_space_quantum) 112 /* 113 * Compute the size of string marks for a given number of quanta. 114 */ 115 #define string_quanta_mark_size(nquanta)\ 116 ((nquanta) * (string_data_quantum / 8)) 117 /* 118 * Compute the size of the string freelists for a chunk. 119 */ 120 #define STRING_FREELIST_SPACE(cp)\ 121 (((cp->climit - csbase(cp) + 255) >> 8) * sizeof(*cp->sfree1)) 122 123 /* 124 * To allow the garbage collector to combine chunks, we store in the 125 * head of each chunk the address to which its contents will be moved. 126 */ 127 /*typedef struct chunk_head_s chunk_head_t; *//* in gxobj.h */ 128 129 /* Structure for a chunk. */ 130 typedef struct chunk_s chunk_t; 131 struct chunk_s { 132 chunk_head_t *chead; /* chunk head, bottom of chunk; */ 133 /* csbase is an alias for chead */ 134 #define csbase(cp) ((byte *)(cp)->chead) 135 /* Note that allocation takes place both from the bottom up */ 136 /* (aligned objects) and from the top down (strings). */ 137 byte *cbase; /* bottom of chunk data area */ 138 byte *int_freed_top; /* top of most recent internal free area */ 139 /* in chunk (which may no longer be free), */ 140 /* used to decide when to consolidate */ 141 /* trailing free space in allocated area */ 142 byte *cbot; /* bottom of free area */ 143 /* (top of aligned objects) */ 144 obj_header_t *rcur; /* current refs object, 0 if none */ 145 byte *rtop; /* top of rcur */ 146 byte *ctop; /* top of free area */ 147 /* (bottom of strings) */ 148 byte *climit; /* top of strings */ 149 byte *cend; /* top of chunk */ 150 chunk_t *cprev; /* chain chunks together, */ 151 chunk_t *cnext; /* sorted by address */ 152 chunk_t *outer; /* the chunk of which this is */ 153 /* an inner chunk, if any */ 154 uint inner_count; /* number of chunks of which this is */ 155 /* the outer chunk, if any */ 156 bool has_refs; /* true if any refs in chunk */ 157 /* 158 * Free lists for single bytes in blocks of 1 to 2*N-1 bytes, one per 159 * 256 bytes in [csbase..climit), where N is sizeof(uint). The chain 160 * pointer is a (1-byte) self-relative offset, terminated by a 0; 161 * obviously, the chain is sorted by increasing address. The free list 162 * pointers themselves are offsets relative to csbase. 163 * 164 * Note that these lists overlay the GC relocation table, and that 165 * sizeof(*sfree1) / 256 must be less than sizeof(string_reloc_offset) / 166 * string_data_quantum (as real numbers). 167 */ 168 #define SFREE_NB 4 /* # of bytes for values on sfree list */ 169 uint *sfree1; 170 /* 171 * Free list for blocks of >= 2*N bytes. Each block begins 172 * with a N-byte size and a N-byte next block pointer, 173 * both big-endian. This too is sorted in increasing address order. 174 */ 175 uint sfree; 176 /* The remaining members are for the GC. */ 177 byte *odest; /* destination for objects */ 178 byte *smark; /* mark bits for strings */ 179 uint smark_size; 180 byte *sbase; /* base for computing smark offsets */ 181 string_reloc_offset *sreloc; /* relocation for string blocks */ 182 byte *sdest; /* destination for (top of) strings */ 183 byte *rescan_bot; /* bottom of rescanning range if */ 184 /* the GC mark stack overflows */ 185 byte *rescan_top; /* top of range ditto */ 186 }; 187 188 /* The chunk descriptor is exported only for isave.c. */ 189 extern_st(st_chunk); 190 #define public_st_chunk() /* in ialloc.c */\ 191 gs_public_st_ptrs2(st_chunk, chunk_t, "chunk_t",\ 192 chunk_enum_ptrs, chunk_reloc_ptrs, cprev, cnext) 193 194 /* 195 * Macros for scanning a chunk linearly, with the following schema: 196 * SCAN_CHUNK_OBJECTS(cp) << declares pre, size >> 197 * << code for all objects -- size not set yet >> 198 * DO_ALL 199 * << code for all objects -- size is set >> 200 * END_OBJECTS_SCAN 201 * 202 * NB on error END_OBJECTS_SCAN calls gs_abort in debug systems. 203 */ 204 #define SCAN_CHUNK_OBJECTS(cp)\ 205 { obj_header_t *pre = (obj_header_t *)((cp)->cbase);\ 206 obj_header_t *end = (obj_header_t *)((cp)->cbot);\ 207 uint size;\ 208 \ 209 for ( ; pre < end;\ 210 pre = (obj_header_t *)((char *)pre + obj_size_round(size))\ 211 )\ 212 { 213 #define DO_ALL\ 214 size = pre_obj_contents_size(pre);\ 215 { 216 #define END_OBJECTS_SCAN_NO_ABORT\ 217 }\ 218 }\ 219 } 220 #ifdef DEBUG 221 # define END_OBJECTS_SCAN\ 222 }\ 223 }\ 224 if ( pre != end )\ 225 { lprintf2("Chunk parsing error, 0x%lx != 0x%lx\n",\ 226 (ulong)pre, (ulong)end);\ 227 /*gs_abort((const gs_memory_t *)NULL);*/ \ 228 }\ 229 } 230 #else 231 # define END_OBJECTS_SCAN END_OBJECTS_SCAN_NO_ABORT 232 #endif 233 234 /* Initialize a chunk. */ 235 /* This is exported for save/restore. */ 236 void alloc_init_chunk(chunk_t *, byte *, byte *, bool, chunk_t *); 237 238 /* Initialize the string freelists in a chunk. */ 239 void alloc_init_free_strings(chunk_t *); 240 241 /* Find the chunk for a pointer. */ 242 /* Note that ptr_is_within_chunk returns true even if the pointer */ 243 /* is in an inner chunk of the chunk being tested. */ 244 #define ptr_is_within_chunk(ptr, cp)\ 245 PTR_BETWEEN((const byte *)(ptr), (cp)->cbase, (cp)->cend) 246 #define ptr_is_in_inner_chunk(ptr, cp)\ 247 ((cp)->inner_count != 0 &&\ 248 PTR_BETWEEN((const byte *)(ptr), (cp)->cbot, (cp)->ctop)) 249 #define ptr_is_in_chunk(ptr, cp)\ 250 (ptr_is_within_chunk(ptr, cp) && !ptr_is_in_inner_chunk(ptr, cp)) 251 typedef struct chunk_locator_s { 252 const gs_ref_memory_t *memory; /* for head & tail of chain */ 253 chunk_t *cp; /* one-element cache */ 254 } chunk_locator_t; 255 bool chunk_locate_ptr(const void *, chunk_locator_t *); 256 257 #define chunk_locate(ptr, clp)\ 258 (((clp)->cp != 0 && ptr_is_in_chunk(ptr, (clp)->cp)) ||\ 259 chunk_locate_ptr(ptr, clp)) 260 261 /* Close up the current chunk. */ 262 /* This is exported for save/restore and for the GC. */ 263 void alloc_close_chunk(gs_ref_memory_t * mem); 264 265 /* Reopen the current chunk after a GC. */ 266 void alloc_open_chunk(gs_ref_memory_t * mem); 267 268 /* Insert or remove a chunk in the address-ordered chain. */ 269 /* These are exported for the GC. */ 270 void alloc_link_chunk(chunk_t *, gs_ref_memory_t *); 271 void alloc_unlink_chunk(chunk_t *, gs_ref_memory_t *); 272 273 /* Free a chunk. This is exported for save/restore and for the GC. */ 274 void alloc_free_chunk(chunk_t *, gs_ref_memory_t *); 275 276 /* Print a chunk debugging message. */ 277 /* Unfortunately, the ANSI C preprocessor doesn't allow us to */ 278 /* define the list of variables being printed as a macro. */ 279 #define dprintf_chunk_format\ 280 "%s 0x%lx (0x%lx..0x%lx, 0x%lx..0x%lx..0x%lx)\n" 281 #define dprintf_chunk(msg, cp)\ 282 dprintf7(dprintf_chunk_format,\ 283 msg, (ulong)(cp), (ulong)(cp)->cbase, (ulong)(cp)->cbot,\ 284 (ulong)(cp)->ctop, (ulong)(cp)->climit, (ulong)(cp)->cend) 285 #define if_debug_chunk(c, msg, cp)\ 286 if_debug7(c, dprintf_chunk_format,\ 287 msg, (ulong)(cp), (ulong)(cp)->cbase, (ulong)(cp)->cbot,\ 288 (ulong)(cp)->ctop, (ulong)(cp)->climit, (ulong)(cp)->cend) 289 290 /* ================ Allocator state ================ */ 291 292 /* Structures for save/restore (not defined here). */ 293 struct alloc_save_s; 294 struct alloc_change_s; 295 296 /* Stream structure, only needed for the streams member of the state. */ 297 #ifndef stream_DEFINED 298 # define stream_DEFINED 299 typedef struct stream_s stream; 300 #endif 301 302 /* 303 * Ref (PostScript object) type, only needed for the binary_token_names 304 * member of the state. This really shouldn't be visible at this level at 305 * all: we include it here only to avoid splitting gs_ref_memory_t two 306 * levels, which would be architecturally better but would involve too much 307 * work at this point. 308 */ 309 #ifndef ref_DEFINED 310 typedef struct ref_s ref; 311 # define ref_DEFINED 312 #endif 313 314 /* 315 * Define the number of freelists. The index in the freelist array 316 * is the ceiling of the size of the object contents (i.e., not including 317 * the header) divided by obj_align_mod. There is an extra entry used to 318 * keep a list of all free blocks > max_freelist_size. 319 */ 320 #define max_freelist_size 800 /* big enough for gstate & contents */ 321 #define num_small_freelists\ 322 ((max_freelist_size + obj_align_mod - 1) / obj_align_mod + 1) 323 #define num_freelists (num_small_freelists + 1) 324 325 /* 326 * Define the index of the freelist containing all free blocks > 327 * max_freelist_size. 328 */ 329 #define LARGE_FREELIST_INDEX num_small_freelists 330 331 /* Define the memory manager subclass for this allocator. */ 332 struct gs_ref_memory_s { 333 /* The following are set at initialization time. */ 334 gs_memory_common; 335 uint chunk_size; 336 uint large_size; /* min size to give large object */ 337 /* its own chunk: must be */ 338 /* 1 mod obj_align_mod */ 339 uint space; /* a_local, a_global, a_system */ 340 # if IGC_PTR_STABILITY_CHECK 341 unsigned space_id:3; /* r_space_bits + 1 bit for "instability". */ 342 # endif 343 /* Callers can change the following dynamically */ 344 /* (through a procedural interface). */ 345 gs_memory_gc_status_t gc_status; 346 /* The following are updated dynamically. */ 347 bool is_controlled; /* if true, this allocator doesn't manage */ 348 /* its own chunks */ 349 ulong limit; /* signal a VMerror when total */ 350 /* allocated exceeds this */ 351 chunk_t *cfirst; /* head of chunk list */ 352 chunk_t *clast; /* tail of chunk list */ 353 chunk_t cc; /* current chunk */ 354 chunk_t *pcc; /* where to store cc */ 355 chunk_locator_t cfreed; /* chunk where last object freed */ 356 ulong allocated; /* total size of all chunks */ 357 /* allocated at this save level */ 358 ulong gc_allocated; /* value of (allocated + */ 359 /* previous_status.allocated) after last GC */ 360 struct lost_ { /* space freed and 'lost' (not put on a */ 361 /* freelist) */ 362 ulong objects; 363 ulong refs; 364 ulong strings; 365 } lost; 366 /* 367 * The following are for the interpreter's convenience: the 368 * library initializes them as indicated and then never touches them. 369 */ 370 int save_level; /* # of saves with non-zero id */ 371 uint new_mask; /* l_new or 0 (default) */ 372 uint test_mask; /* l_new or ~0 (default) */ 373 stream *streams; /* streams allocated at current level */ 374 ref *names_array; /* system_names or user_names, if needed */ 375 /* Garbage collector information */ 376 gs_gc_root_t *roots; /* roots for GC */ 377 /* Sharing / saved state information */ 378 int num_contexts; /* # of contexts sharing this VM */ 379 struct alloc_change_s *changes; 380 struct alloc_change_s *scan_limit; 381 struct alloc_save_s *saved; 382 long total_scanned; 383 long total_scanned_after_compacting; 384 struct alloc_save_s *reloc_saved; /* for GC */ 385 gs_memory_status_t previous_status; /* total allocated & used */ 386 /* in outer save levels */ 387 uint largest_free_size; /* largest (aligned) size on large block list */ 388 /* We put the freelists last to keep the scalar offsets small. */ 389 obj_header_t *freelists[num_freelists]; 390 }; 391 392 /* The descriptor for gs_ref_memory_t is exported only for */ 393 /* the alloc_save_t subclass; otherwise, it should be private. */ 394 extern_st(st_ref_memory); 395 #define public_st_ref_memory() /* in gsalloc.c */\ 396 gs_public_st_composite(st_ref_memory, gs_ref_memory_t,\ 397 "gs_ref_memory", ref_memory_enum_ptrs, ref_memory_reloc_ptrs) 398 #define st_ref_memory_max_ptrs 5 /* streams, names_array, changes, scan_limit, saved */ 399 400 /* Define the procedures for the standard allocator. */ 401 /* We export this for subclasses. */ 402 extern const gs_memory_procs_t gs_ref_memory_procs; 403 404 /* 405 * Scan the chunks of an allocator: 406 * SCAN_MEM_CHUNKS(mem, cp) 407 * << code to process chunk cp >> 408 * END_CHUNKS_SCAN 409 */ 410 #define SCAN_MEM_CHUNKS(mem, cp)\ 411 { chunk_t *cp = (mem)->cfirst;\ 412 for ( ; cp != 0; cp = cp->cnext )\ 413 { 414 #define END_CHUNKS_SCAN\ 415 }\ 416 } 417 418 /* ================ Debugging ================ */ 419 420 #ifdef DEBUG 421 422 /* 423 * Define the options for a memory dump. These may be or'ed together. 424 */ 425 typedef enum { 426 dump_do_default = 0, /* pro forma */ 427 dump_do_strings = 1, 428 dump_do_type_addresses = 2, 429 dump_do_no_types = 4, 430 dump_do_pointers = 8, 431 dump_do_pointed_strings = 16, /* only if do_pointers also set */ 432 dump_do_contents = 32, 433 dump_do_marks = 64 434 } dump_options_t; 435 436 /* 437 * Define all the parameters controlling what gets dumped. 438 */ 439 typedef struct dump_control_s { 440 dump_options_t options; 441 const byte *bottom; 442 const byte *top; 443 } dump_control_t; 444 445 /* Define the two most useful dump control structures. */ 446 extern const dump_control_t dump_control_default; 447 extern const dump_control_t dump_control_all; 448 449 /* ------ Procedures ------ */ 450 451 /* Print one object with the given options. */ 452 /* Relevant options: type_addresses, no_types, pointers, pointed_strings, */ 453 /* contents. */ 454 void debug_print_object(const gs_memory_t *mem, const void *obj, const dump_control_t * control); 455 456 /* Print the contents of a chunk with the given options. */ 457 /* Relevant options: all. */ 458 void debug_dump_chunk(const gs_memory_t *mem, const chunk_t * cp, const dump_control_t * control); 459 void debug_print_chunk(const gs_memory_t *mem, const chunk_t * cp); /* default options */ 460 461 /* Print the contents of all chunks managed by an allocator. */ 462 /* Relevant options: all. */ 463 void debug_dump_memory(const gs_ref_memory_t *mem, 464 const dump_control_t *control); 465 466 /* convenience routine to call debug_dump_memory(), defaults 467 dump_control_t to dump_control_all. */ 468 void debug_dump_allocator(const gs_ref_memory_t *mem); 469 470 /* Find all the objects that contain a given pointer. */ 471 void debug_find_pointers(const gs_ref_memory_t *mem, const void *target); 472 473 #endif /* DEBUG */ 474 475 #endif /* gxalloc_INCLUDED */ 476