1 /* Copyright (C) 2001-2006 Artifex Software, Inc.
2    All Rights Reserved.
3 
4    This software is provided AS-IS with no warranty, either express or
5    implied.
6 
7    This software is distributed under license and may not be copied, modified
8    or distributed except as expressly authorized under the terms of that
9    license.  Refer to licensing information at http://www.artifex.com/
10    or contact Artifex Software, Inc.,  7 Mt. Lassen Drive - Suite A-134,
11    San Rafael, CA  94903, U.S.A., +1(415)492-9861, for further information.
12 */
13 
14 /* $Id: istack.c 9043 2008-08-28 22:48:19Z giles $ */
15 /* Manager for expandable stacks of refs */
16 #include "memory_.h"
17 #include "ghost.h"
18 #include "gsstruct.h"
19 #include "gsutil.h"
20 #include "ierrors.h"
21 #include "ialloc.h"
22 #include "istack.h"
23 #include "istkparm.h"
24 #include "istruct.h"		/* for RELOC_REF_VAR */
25 #include "iutil.h"
26 #include "ivmspace.h"		/* for local/global test */
27 #include "store.h"
28 
29 /* Forward references */
30 static void init_block(ref_stack_t *pstack, const ref *pblock_array,
31 			uint used);
32 static int ref_stack_push_block(ref_stack_t *pstack, uint keep, uint add);
33 
34 /* GC descriptors and procedures */
35 private_st_ref_stack_params();
36 static
CLEAR_MARKS_PROC(ref_stack_clear_marks)37 CLEAR_MARKS_PROC(ref_stack_clear_marks)
38 {
39     ref_stack_t *const sptr = vptr;
40 
41     r_clear_attrs(&sptr->current, l_mark);
42 }
43 static
44 ENUM_PTRS_WITH(ref_stack_enum_ptrs, ref_stack_t *sptr) return 0;
45 case 0: ENUM_RETURN_REF(&sptr->current);
46 case 1: return ENUM_OBJ(sptr->params);
47 ENUM_PTRS_END
RELOC_PTRS_WITH(ref_stack_reloc_ptrs,ref_stack_t * sptr)48 static RELOC_PTRS_WITH(ref_stack_reloc_ptrs, ref_stack_t *sptr)
49 {
50     /* Note that the relocation must be a multiple of sizeof(ref_packed) */
51     /* * align_packed_per_ref, but it need not be a multiple of */
52     /* sizeof(ref).  Therefore, we must do the adjustments using */
53     /* ref_packed pointers rather than ref pointers. */
54     ref_packed *bot = (ref_packed *) sptr->current.value.refs;
55     long reloc;
56 
57     RELOC_REF_VAR(sptr->current);
58     r_clear_attrs(&sptr->current, l_mark);
59     reloc = bot - (ref_packed *) sptr->current.value.refs;
60 #define RELOC_P(p)\
61   sptr->p = (ref *)((ref_packed *)sptr->p - reloc);
62     RELOC_P(p);
63     RELOC_P(bot);
64     RELOC_P(top);
65 #undef RELOC_P
66     RELOC_OBJ_VAR(sptr->params);
67 } RELOC_PTRS_END
68 /* Structure type for a ref_stack. */
69 public_st_ref_stack();
70 
71 /* Initialize a stack. */
72 int
ref_stack_init(ref_stack_t * pstack,const ref * pblock_array,uint bot_guard,uint top_guard,const ref * pguard_value,gs_ref_memory_t * mem,ref_stack_params_t * params)73 ref_stack_init(ref_stack_t *pstack, const ref *pblock_array,
74 	       uint bot_guard, uint top_guard, const ref *pguard_value,
75 	       gs_ref_memory_t *mem, ref_stack_params_t *params)
76 {
77     uint size = r_size(pblock_array);
78     uint avail = size - (stack_block_refs + bot_guard + top_guard);
79     ref_stack_block *pblock = (ref_stack_block *)pblock_array->value.refs;
80     s_ptr body = (s_ptr)(pblock + 1);
81 
82     if (params == 0) {
83 	params = gs_alloc_struct((gs_memory_t *)mem, ref_stack_params_t,
84 				 &st_ref_stack_params,
85 				 "ref_stack_alloc(stack.params)");
86 	if (params == 0)
87 	    return_error(-1);	/* avoid binding in any error codes */
88     }
89 
90     pstack->bot = body + bot_guard;
91     pstack->p = pstack->bot - 1;
92     pstack->top = pstack->p + avail;
93     pstack->current = *pblock_array;
94     pstack->extension_size = 0;
95     pstack->extension_used = 0;
96 
97     make_int(&pstack->max_stack, avail);
98     pstack->requested = 0;
99     pstack->margin = 0;
100     pstack->body_size = avail;
101 
102     pstack->params = params;
103     pstack->memory = mem;
104 
105     params->bot_guard = bot_guard;
106     params->top_guard = top_guard;
107     params->block_size = size;
108     params->data_size = avail;
109     if (pguard_value != 0)
110 	params->guard_value = *pguard_value;
111     else
112 	make_tav(&params->guard_value, t__invalid, 0, intval, 0);
113     params->underflow_error = -1;
114     params->overflow_error = -1;
115     params->allow_expansion = true;
116     init_block(pstack, pblock_array, 0);
117     refset_null_new(pstack->bot, avail, 0);
118     make_empty_array(&pblock->next, 0);
119     return 0;
120 }
121 
122 /* Set whether a stack is allowed to expand.  The initial value is true. */
123 void
ref_stack_allow_expansion(ref_stack_t * pstack,bool expand)124 ref_stack_allow_expansion(ref_stack_t *pstack, bool expand)
125 {
126     pstack->params->allow_expansion = expand;
127 }
128 
129 /* Set the error codes for under- and overflow.  The initial values are -1. */
130 void
ref_stack_set_error_codes(ref_stack_t * pstack,int underflow_error,int overflow_error)131 ref_stack_set_error_codes(ref_stack_t *pstack, int underflow_error,
132 			  int overflow_error)
133 {
134     pstack->params->underflow_error = underflow_error;
135     pstack->params->overflow_error = overflow_error;
136 }
137 
138 /* Set the maximum number of elements allowed on a stack. */
139 int
ref_stack_set_max_count(ref_stack_t * pstack,long nmax)140 ref_stack_set_max_count(ref_stack_t *pstack, long nmax)
141 {
142     uint nmin = ref_stack_count_inline(pstack);
143 
144     if (nmax < nmin)
145 	nmax = nmin;
146     if (nmax > max_uint / sizeof(ref))
147 	nmax = max_uint / sizeof(ref);
148     if (!pstack->params->allow_expansion) {
149 	uint ncur = pstack->body_size;
150 
151 	if (nmax > ncur)
152 	    nmax = ncur;
153     }
154     pstack->max_stack.value.intval = nmax;
155     return 0;
156 }
157 
158 /*
159  * Set the margin between the limit and the top of the stack.
160  * Note that this may require allocating a block.
161  */
162 int
ref_stack_set_margin(ref_stack_t * pstack,uint margin)163 ref_stack_set_margin(ref_stack_t *pstack, uint margin)
164 {
165     const ref_stack_params_t *params = pstack->params;
166     uint data_size = params->data_size;
167 
168     if (margin <= pstack->margin) {
169 	refset_null_new(pstack->top + 1, pstack->margin - margin, 0);
170     } else {
171 	if (margin > data_size >> 1)
172 	    return_error(e_rangecheck);
173 	if (pstack->top - pstack->p < margin) {
174 	    uint used = pstack->p + 1 - pstack->bot;
175 	    uint keep = data_size - margin;
176 	    int code = ref_stack_push_block(pstack, keep, used - keep);
177 
178 	    if (code < 0)
179 		return code;
180 	}
181     }
182     pstack->margin = margin;
183     pstack->body_size = data_size - margin;
184     pstack->top = pstack->bot + pstack->body_size - 1;
185     return 0;
186 }
187 
188 /* Return the number of elements on a stack. */
189 uint
ref_stack_count(const ref_stack_t * pstack)190 ref_stack_count(const ref_stack_t *pstack)
191 {
192     return ref_stack_count_inline(pstack);
193 }
194 
195 /*
196  * Return a pointer to a given element from the stack, counting from
197  * 0 as the top element.  If the index is out of range, return 0.
198  */
199 ref *
ref_stack_index(const ref_stack_t * pstack,long idx)200 ref_stack_index(const ref_stack_t *pstack, long idx)
201 {
202     ref_stack_block *pblock;
203     uint used = pstack->p + 1 - pstack->bot;
204 
205     if (idx < 0)
206 	return NULL;
207     if (idx < used)		/* common case */
208 	return pstack->p - (uint) idx;
209     pblock = (ref_stack_block *) pstack->current.value.refs;
210     do {
211 	pblock = (ref_stack_block *) pblock->next.value.refs;
212 	if (pblock == 0)
213 	    return NULL;
214 	idx -= used;
215 	used = r_size(&pblock->used);
216     } while (idx >= used);
217     return pblock->used.value.refs + (used - 1 - (uint) idx);
218 }
219 
220 /*
221  * Count the number of elements down to and including the first mark.
222  * If no mark is found, return 0.
223  */
224 uint
ref_stack_counttomark(const ref_stack_t * pstack)225 ref_stack_counttomark(const ref_stack_t *pstack)
226 {
227     uint scanned = 0;
228     ref_stack_enum_t rsenum;
229 
230     ref_stack_enum_begin(&rsenum, pstack);
231     do {
232 	uint count = rsenum.size;
233 	const ref *p = rsenum.ptr + count - 1;
234 
235 	for (; count; count--, p--)
236 	    if (r_has_type(p, t_mark))
237 		return scanned + (rsenum.size - count + 1);
238 	scanned += rsenum.size;
239     } while (ref_stack_enum_next(&rsenum));
240     return 0;
241 }
242 
243 /*
244  * Do the store check for storing 'count' elements of a stack, starting
245  * 'skip' elements below the top, into an array.  Return 0 or e_invalidaccess.
246  */
247 int
ref_stack_store_check(const ref_stack_t * pstack,ref * parray,uint count,uint skip)248 ref_stack_store_check(const ref_stack_t *pstack, ref *parray, uint count,
249 		      uint skip)
250 {
251     uint space = r_space(parray);
252 
253     if (space != avm_local) {
254 	uint left = count, pass = skip;
255 	ref_stack_enum_t rsenum;
256 
257 	ref_stack_enum_begin(&rsenum, pstack);
258 	do {
259 	    ref *ptr = rsenum.ptr;
260 	    uint size = rsenum.size;
261 
262 	    if (size <= pass)
263 		pass -= size;
264 	    else {
265 		int code;
266 
267 		if (pass != 0)
268 		    size -= pass, pass = 0;
269 		ptr += size;
270 		if (size > left)
271 		    size = left;
272 		left -= size;
273 		code = refs_check_space(ptr - size, size, space);
274 		if (code < 0)
275 		    return code;
276 		if (left == 0)
277 		    break;
278 	    }
279 	} while (ref_stack_enum_next(&rsenum));
280     }
281     return 0;
282 }
283 
284 /*
285  * Store the top 'count' elements of a stack, starting 'skip' elements below
286  * the top, into an array, with or without store/undo checking.  age=-1 for
287  * no check, 0 for old, 1 for new.  May return e_rangecheck or
288  * e_invalidaccess.
289  */
290 #undef idmemory			/****** NOTA BENE ******/
291 int
ref_stack_store(const ref_stack_t * pstack,ref * parray,uint count,uint skip,int age,bool check,gs_dual_memory_t * idmemory,client_name_t cname)292 ref_stack_store(const ref_stack_t *pstack, ref *parray, uint count,
293 		uint skip, int age, bool check, gs_dual_memory_t *idmemory,
294 		client_name_t cname)
295 {
296     uint left, pass;
297     ref *to;
298     ref_stack_enum_t rsenum;
299 
300     if (count > ref_stack_count(pstack) || count > r_size(parray))
301 	return_error(e_rangecheck);
302     if (check) {
303 	int code = ref_stack_store_check(pstack, parray, count, skip);
304 
305 	if (code < 0)
306 	    return code;
307     }
308     to = parray->value.refs + count;
309     left = count, pass = skip;
310     ref_stack_enum_begin(&rsenum, pstack);
311     do {
312 	ref *from = rsenum.ptr;
313 	uint size = rsenum.size;
314 
315 	if (size <= pass)
316 	    pass -= size;
317 	else {
318 	    if (pass != 0)
319 		size -= pass, pass = 0;
320 	    from += size;
321 	    if (size > left)
322 		size = left;
323 	    left -= size;
324 	    switch (age) {
325 	    case -1:		/* not an array */
326 		while (size--) {
327 		    from--, to--;
328 		    ref_assign(to, from);
329 		}
330 		break;
331 	    case 0:		/* old array */
332 		while (size--) {
333 		    from--, to--;
334 		    ref_assign_old(parray, to, from, cname);
335 		}
336 		break;
337 	    case 1:		/* new array */
338 		while (size--) {
339 		    from--, to--;
340 		    ref_assign_new(to, from);
341 		}
342 		break;
343 	    }
344 	    if (left == 0)
345 		break;
346 	}
347     } while (ref_stack_enum_next(&rsenum));
348     r_set_size(parray, count);
349     return 0;
350 }
351 
352 /*
353  * Pop the top N elements off a stack.
354  * The number must not exceed the number of elements in use.
355  */
356 void
ref_stack_pop(ref_stack_t * pstack,uint count)357 ref_stack_pop(ref_stack_t *pstack, uint count)
358 {
359     uint used;
360 
361     while ((used = pstack->p + 1 - pstack->bot) < count) {
362 	count -= used;
363 	pstack->p = pstack->bot - 1;
364 	ref_stack_pop_block(pstack);
365     }
366     pstack->p -= count;
367 }
368 
369 /* Pop the top block off a stack.  May return underflow_error. */
370 int
ref_stack_pop_block(ref_stack_t * pstack)371 ref_stack_pop_block(ref_stack_t *pstack)
372 {
373     s_ptr bot = pstack->bot;
374     uint count = pstack->p + 1 - bot;
375     ref_stack_block *pcur =
376     (ref_stack_block *) pstack->current.value.refs;
377     ref_stack_block *pnext =
378     (ref_stack_block *) pcur->next.value.refs;
379     uint used;
380     ref *body;
381     ref next;
382 
383     if (pnext == 0)
384 	return_error(pstack->params->underflow_error);
385     used = r_size(&pnext->used);
386     body = (ref *) (pnext + 1) + pstack->params->bot_guard;
387     next = pcur->next;
388     /*
389        * If the contents of the two blocks won't fit in a single block, we
390        * move up the used part of the top block, and copy up as much of
391        * the contents of the next block under it as will fit.  If the
392        * contents of both blocks fit in a single block, we copy the used
393        * part of the top block to the top of the next block, and free the
394        * top block.
395      */
396     if (used + count > pstack->body_size) {
397 	/*
398 	 * The contents of the two blocks won't fit into a single block.
399 	 * On the assumption that we're recovering from a local stack
400 	 * underflow and need to increase the number of contiguous
401 	 * elements available, move up the used part of the top block, and
402 	 * copy up as much of the contents of the next block under it as
403 	 * will fit.
404 	 */
405 	uint moved = pstack->body_size - count;
406 	uint left;
407 
408 	if (moved == 0)
409 	    return_error(e_Fatal);
410 	memmove(bot + moved, bot, count * sizeof(ref));
411 	left = used - moved;
412 	memcpy(bot, body + left, moved * sizeof(ref));
413 	refset_null_new(body + left, moved, 0);
414 	r_dec_size(&pnext->used, moved);
415 	pstack->p = pstack->top;
416 	pstack->extension_used -= moved;
417     } else {
418 	/*
419 	   * The contents of the two blocks will fit into a single block.
420 	   * Copy the used part of the top block to the top of the next
421 	   * block, and free the top block.
422 	 */
423 	memcpy(body + used, bot, count * sizeof(ref));
424 	pstack->bot = bot = body;
425 	pstack->top = bot + pstack->body_size - 1;
426 	gs_free_ref_array(pstack->memory, &pstack->current,
427 			  "ref_stack_pop_block");
428 	pstack->current = next;
429 	pstack->p = bot + (used + count - 1);
430 	pstack->extension_size -= pstack->body_size;
431 	pstack->extension_used -= used;
432     }
433     return 0;
434 }
435 
436 /*
437  * Extend a stack to recover from an overflow condition.
438  * May return overflow_error or e_VMerror.
439  */
440 int
ref_stack_extend(ref_stack_t * pstack,uint request)441 ref_stack_extend(ref_stack_t *pstack, uint request)
442 {
443     uint keep = (pstack->top - pstack->bot + 1) / 3;
444     uint count = pstack->p - pstack->bot + 1;
445     const ref_stack_params_t *params = pstack->params;
446 
447     if (request > params->data_size)
448 	return_error(params->overflow_error);
449     if (keep + request > pstack->body_size)
450 	keep = pstack->body_size - request;
451     if (keep > count)
452 	keep = count;		/* required by ref_stack_push_block */
453     return ref_stack_push_block(pstack, keep, request);
454 }
455 
456 /*
457  * Push N empty slots onto a stack.  These slots are not initialized:
458  * the caller must immediately fill them.  May return overflow_error
459  * (if max_stack would be exceeded, or the stack has no allocator)
460  * or e_VMerror.
461  */
462 int
ref_stack_push(ref_stack_t * pstack,uint count)463 ref_stack_push(ref_stack_t *pstack, uint count)
464 {
465     /* Don't bother to pre-check for overflow: we must be able to */
466     /* back out in the case of a VMerror anyway, and */
467     /* ref_stack_push_block will make the check itself. */
468     uint needed = count;
469     uint added;
470 
471     for (; (added = pstack->top - pstack->p) < needed; needed -= added) {
472 	int code;
473 
474 	pstack->p = pstack->top;
475 	code = ref_stack_push_block(pstack,
476 				    (pstack->top - pstack->bot + 1) / 3,
477 				    added);
478 	if (code < 0) {
479 	    /* Back out. */
480 	    ref_stack_pop(pstack, count - needed + added);
481 	    pstack->requested = count;
482 	    return code;
483 	}
484     }
485     pstack->p += needed;
486     return 0;
487 }
488 
489 /*
490  * Push a block onto the stack, specifying how many elements of the current
491  * top block should remain in the top block and also how many elements we
492  * are trying to add.  Requires keep <= count.  May return overflow_error or
493  * e_VMerror.
494  */
495 static int
ref_stack_push_block(ref_stack_t * pstack,uint keep,uint add)496 ref_stack_push_block(ref_stack_t *pstack, uint keep, uint add)
497 {
498     const ref_stack_params_t *params = pstack->params;
499     uint count = pstack->p - pstack->bot + 1;
500     uint move = count - keep;
501     ref_stack_block *pcur = (ref_stack_block *) pstack->current.value.refs;
502     ref next;
503     ref_stack_block *pnext;
504     ref *body;
505     int code;
506 
507     if (keep > count)
508 	return_error(e_Fatal);
509     /* Check for overflowing the maximum size, */
510     /* or expansion not allowed.  */
511     if (pstack->extension_used + (pstack->top - pstack->bot) + add >=
512 	pstack->max_stack.value.intval ||
513 	!params->allow_expansion
514 	)
515 	return_error(params->overflow_error);
516     code = gs_alloc_ref_array(pstack->memory, &next, 0,
517 			      params->block_size, "ref_stack_push_block");
518     if (code < 0)
519 	return code;
520     pnext = (ref_stack_block *) next.value.refs;
521     body = (ref *) (pnext + 1);
522     /* Copy the top keep elements into the new block, */
523     /* and make the new block the top block. */
524     init_block(pstack, &next, keep);
525     body += params->bot_guard;
526     memcpy(body, pstack->bot + move, keep * sizeof(ref));
527     /* Clear the elements above the top of the new block. */
528     refset_null_new(body + keep, params->data_size - keep, 0);
529     /* Clear the elements above the top of the old block. */
530     refset_null_new(pstack->bot + move, keep, 0);
531     pnext->next = pstack->current;
532     pcur->used.value.refs = pstack->bot;
533     r_set_size(&pcur->used, move);
534     pstack->current = next;
535     pstack->bot = body;
536     pstack->top = pstack->bot + pstack->body_size - 1;
537     pstack->p = pstack->bot + keep - 1;
538     pstack->extension_size += pstack->body_size;
539     pstack->extension_used += move;
540     return 0;
541 }
542 
543 /* Begin enumerating the blocks of a stack. */
544 void
ref_stack_enum_begin(ref_stack_enum_t * prse,const ref_stack_t * pstack)545 ref_stack_enum_begin(ref_stack_enum_t *prse, const ref_stack_t *pstack)
546 {
547     prse->block = (ref_stack_block *)pstack->current.value.refs;
548     prse->ptr = pstack->bot;
549     prse->size = pstack->p + 1 - pstack->bot;
550 }
551 
552 bool
ref_stack_enum_next(ref_stack_enum_t * prse)553 ref_stack_enum_next(ref_stack_enum_t *prse)
554 {
555     ref_stack_block *block =
556 	prse->block = (ref_stack_block *)prse->block->next.value.refs;
557 
558     if (block == 0)
559 	return false;
560     prse->ptr = block->used.value.refs;
561     prse->size = r_size(&block->used);
562     return true;
563 }
564 
565 /* Clean up a stack for garbage collection. */
566 void
ref_stack_cleanup(ref_stack_t * pstack)567 ref_stack_cleanup(ref_stack_t *pstack)
568 {
569     ref_stack_block *pblock =
570 	(ref_stack_block *) pstack->current.value.refs;
571 
572     refset_null_new(pstack->p + 1, pstack->top - pstack->p, 0);
573     pblock->used = pstack->current;	/* set attrs */
574     pblock->used.value.refs = pstack->bot;
575     r_set_size(&pblock->used, pstack->p + 1 - pstack->bot);
576 }
577 
578 /*
579  * Free the entire contents of a stack, including the bottom block.
580  * The client must still call ref_stack_free.  Note that after calling
581  * ref_stack_release, the stack is no longer usable.
582  */
583 void
ref_stack_release(ref_stack_t * pstack)584 ref_stack_release(ref_stack_t *pstack)
585 {
586     gs_ref_memory_t *mem = pstack->memory;
587 
588     ref_stack_clear(pstack);
589     /* Free the parameter structure. */
590     gs_free_object((gs_memory_t *)mem, pstack->params,
591 		   "ref_stack_release(stack.params)");
592     /* Free the original (bottom) block. */
593     gs_free_ref_array(mem, &pstack->current, "ref_stack_release");
594 }
595 
596 /*
597  * Release a stack and then free the ref_stack object.
598  */
599 void
ref_stack_free(ref_stack_t * pstack)600 ref_stack_free(ref_stack_t *pstack)
601 {
602     gs_memory_t *mem = (gs_memory_t *)pstack->memory;
603 
604     ref_stack_release(pstack);
605     gs_free_object(mem, pstack, "ref_stack_free");
606 }
607 
608 /* ------ Internal routines ------ */
609 
610 /* Initialize the guards and body of a stack block. */
611 static void
init_block(ref_stack_t * pstack,const ref * psb,uint used)612 init_block(ref_stack_t *pstack, const ref *psb, uint used)
613 {
614     ref_stack_params_t *params = pstack->params;
615     ref *brefs = psb->value.refs;
616     uint i;
617     ref *p;
618 
619     for (i = params->bot_guard, p = brefs + stack_block_refs;
620 	 i != 0; i--, p++
621 	)
622 	ref_assign(p, &params->guard_value);
623     /* The top guard elements will never be read, */
624     /* but we need to initialize them for the sake of the GC. */
625     /* We can use refset_null for this, because even though it uses */
626     /* make_null_new and stack elements must not be marked new, */
627     /* these slots will never actually be read or written. */
628     if (params->top_guard) {
629 	ref *top = brefs + r_size(psb);
630 	int top_guard = params->top_guard;
631 
632 	refset_null_new(top - top_guard, top_guard, 0);
633     } {
634 	ref_stack_block *const pblock = (ref_stack_block *) brefs;
635 
636 	pblock->used = *psb;
637 	pblock->used.value.refs = brefs + stack_block_refs + params->bot_guard;
638 	r_set_size(&pblock->used, 0);
639     }
640 }
641