1 /* Copyright (C) 2001-2006 Artifex Software, Inc.
2 All Rights Reserved.
3
4 This software is provided AS-IS with no warranty, either express or
5 implied.
6
7 This software is distributed under license and may not be copied, modified
8 or distributed except as expressly authorized under the terms of that
9 license. Refer to licensing information at http://www.artifex.com/
10 or contact Artifex Software, Inc., 7 Mt. Lassen Drive - Suite A-134,
11 San Rafael, CA 94903, U.S.A., +1(415)492-9861, for further information.
12 */
13
14 /*$Id: gxclist.c 10200 2009-10-21 03:50:46Z mvrhel $ */
15 /* Command list document- and page-level code. */
16 #include "memory_.h"
17 #include "string_.h"
18 #include "gx.h"
19 #include "gp.h"
20 #include "gpcheck.h"
21 #include "gserrors.h"
22 #include "gxdevice.h"
23 #include "gxdevmem.h" /* must precede gxcldev.h */
24 #include "gxcldev.h"
25 #include "gxclpath.h"
26 #include "gsparams.h"
27 #include "gxdcolor.h"
28
29 extern dev_proc_open_device(pattern_clist_open_device);
30
31 /* GC information */
32 extern_st(st_imager_state);
33 static
34 ENUM_PTRS_WITH(device_clist_enum_ptrs, gx_device_clist *cdev)
35 if (index < st_device_forward_max_ptrs) {
36 gs_ptr_type_t ret = ENUM_USING_PREFIX(st_device_forward, 0);
37
38 return (ret ? ret : ENUM_OBJ(0));
39 }
40 index -= st_device_forward_max_ptrs;
41 if (CLIST_IS_WRITER(cdev)) {
42 switch (index) {
43 case 0: return ENUM_OBJ((cdev->writer.image_enum_id != gs_no_id ?
44 cdev->writer.clip_path : 0));
45 case 1: return ENUM_OBJ((cdev->writer.image_enum_id != gs_no_id ?
46 cdev->writer.color_space.space : 0));
47 case 2: return ENUM_OBJ(cdev->writer.pinst);
48 case 3: return ENUM_OBJ(cdev->writer.cropping_stack);
49 default:
50 return ENUM_USING(st_imager_state, &cdev->writer.imager_state,
51 sizeof(gs_imager_state), index - 3);
52 }
53 }
54 else {
55 /* 041207
56 * clist is reader.
57 * We don't expect this code to be exercised at this time as the reader
58 * runs under gdev_prn_output_page which is an atomic function of the
59 * interpreter. We do this as this situation may change in the future.
60 */
61 if (index == 0)
62 return ENUM_OBJ(cdev->reader.band_complexity_array);
63 else if (index == 1)
64 return ENUM_OBJ(cdev->reader.offset_map);
65 else
66 return 0;
67 }
68 ENUM_PTRS_END
69 static
RELOC_PTRS_WITH(device_clist_reloc_ptrs,gx_device_clist * cdev)70 RELOC_PTRS_WITH(device_clist_reloc_ptrs, gx_device_clist *cdev)
71 {
72 RELOC_PREFIX(st_device_forward);
73 if (CLIST_IS_WRITER(cdev)) {
74 if (cdev->writer.image_enum_id != gs_no_id) {
75 RELOC_VAR(cdev->writer.clip_path);
76 RELOC_VAR(cdev->writer.color_space.space);
77 }
78 RELOC_VAR(cdev->writer.pinst);
79 RELOC_VAR(cdev->writer.cropping_stack);
80 RELOC_USING(st_imager_state, &cdev->writer.imager_state,
81 sizeof(gs_imager_state));
82 } else {
83 /* 041207
84 * clist is reader.
85 * See note above in ENUM_PTRS_WITH section.
86 */
87 RELOC_VAR(cdev->reader.band_complexity_array);
88 RELOC_VAR(cdev->reader.offset_map);
89 }
90 } RELOC_PTRS_END
91 public_st_device_clist();
92 private_st_clist_writer_cropping_buffer();
93
94 /* Forward declarations of driver procedures */
95 dev_proc_open_device(clist_open);
96 static dev_proc_output_page(clist_output_page);
97 static dev_proc_close_device(clist_close);
98 static dev_proc_get_band(clist_get_band);
99 /* Driver procedures defined in other files are declared in gxcldev.h. */
100
101 /* Other forward declarations */
102 static int clist_put_current_params(gx_device_clist_writer *cldev);
103
104 /* The device procedures */
105 const gx_device_procs gs_clist_device_procs = {
106 clist_open,
107 gx_forward_get_initial_matrix,
108 gx_default_sync_output,
109 clist_output_page,
110 clist_close,
111 gx_forward_map_rgb_color,
112 gx_forward_map_color_rgb,
113 clist_fill_rectangle,
114 gx_default_tile_rectangle,
115 clist_copy_mono,
116 clist_copy_color,
117 gx_default_draw_line,
118 gx_default_get_bits,
119 gx_forward_get_params,
120 gx_forward_put_params,
121 gx_forward_map_cmyk_color,
122 gx_forward_get_xfont_procs,
123 gx_forward_get_xfont_device,
124 gx_forward_map_rgb_alpha_color,
125 gx_forward_get_page_device,
126 gx_forward_get_alpha_bits,
127 clist_copy_alpha,
128 clist_get_band,
129 gx_default_copy_rop,
130 clist_fill_path,
131 clist_stroke_path,
132 clist_fill_mask,
133 clist_fill_trapezoid,
134 clist_fill_parallelogram,
135 clist_fill_triangle,
136 gx_default_draw_thin_line,
137 gx_default_begin_image,
138 gx_default_image_data,
139 gx_default_end_image,
140 clist_strip_tile_rectangle,
141 clist_strip_copy_rop,
142 gx_forward_get_clipping_box,
143 clist_begin_typed_image,
144 clist_get_bits_rectangle,
145 gx_forward_map_color_rgb_alpha,
146 clist_create_compositor,
147 gx_forward_get_hardware_params,
148 gx_default_text_begin,
149 gx_default_finish_copydevice,
150 NULL, /* begin_transparency_group */
151 NULL, /* end_transparency_group */
152 NULL, /* begin_transparency_mask */
153 NULL, /* end_transparency_mask */
154 NULL, /* discard_transparency_layer */
155 gx_forward_get_color_mapping_procs,
156 gx_forward_get_color_comp_index,
157 gx_forward_encode_color,
158 gx_forward_decode_color,
159 clist_pattern_manage,
160 gx_default_fill_rectangle_hl_color,
161 gx_default_include_color_space,
162 gx_default_fill_linear_color_scanline,
163 clist_fill_linear_color_trapezoid,
164 clist_fill_linear_color_triangle,
165 gx_forward_update_spot_equivalent_colors,
166 gx_forward_ret_devn_params,
167 clist_fillpage,
168 NULL, /* push_transparency_state */
169 NULL /* pop_transparency_state */
170
171 };
172
173 /*------------------- Choose the implementation -----------------------
174
175 For chossing the clist i/o implementation by makefile options
176 we define global variables, which are initialized with
177 file/memory io procs when they are included into the build.
178 */
179 const clist_io_procs_t *clist_io_procs_file_global = NULL;
180 const clist_io_procs_t *clist_io_procs_memory_global = NULL;
181
182 void
clist_init_io_procs(gx_device_clist * pclist_dev,bool in_memory)183 clist_init_io_procs(gx_device_clist *pclist_dev, bool in_memory)
184 {
185 if (in_memory || clist_io_procs_file_global == NULL)
186 pclist_dev->common.page_info.io_procs = clist_io_procs_memory_global;
187 else
188 pclist_dev->common.page_info.io_procs = clist_io_procs_file_global;
189 }
190
191 /* ------ Define the command set and syntax ------ */
192
193 /* Initialization for imager state. */
194 /* The initial scale is arbitrary. */
195 const gs_imager_state clist_imager_state_initial =
196 {gs_imager_state_initial(300.0 / 72.0, false)};
197
198 /*
199 * The buffer area (data, data_size) holds a bitmap cache when both writing
200 * and reading. The rest of the space is used for the command buffer and
201 * band state bookkeeping when writing, and for the rendering buffer (image
202 * device) when reading. For the moment, we divide the space up
203 * arbitrarily, except that we allocate less space for the bitmap cache if
204 * the device doesn't need halftoning.
205 *
206 * All the routines for allocating tables in the buffer are idempotent, so
207 * they can be used to check whether a given-size buffer is large enough.
208 */
209
210 /*
211 * Calculate the desired size for the tile cache.
212 */
213 static uint
clist_tile_cache_size(const gx_device * target,uint data_size)214 clist_tile_cache_size(const gx_device * target, uint data_size)
215 {
216 uint bits_size =
217 (data_size / 5) & -align_cached_bits_mod; /* arbitrary */
218
219 if (!gx_device_must_halftone(target)) { /* No halftones -- cache holds only Patterns & characters. */
220 bits_size -= bits_size >> 2;
221 }
222 #define min_bits_size 1024
223 if (bits_size < min_bits_size)
224 bits_size = min_bits_size;
225 #undef min_bits_size
226 return bits_size;
227 }
228
229 /*
230 * Initialize the allocation for the tile cache. Sets: tile_hash_mask,
231 * tile_max_count, tile_table, chunk (structure), bits (structure).
232 */
233 static int
clist_init_tile_cache(gx_device * dev,byte * init_data,ulong data_size)234 clist_init_tile_cache(gx_device * dev, byte * init_data, ulong data_size)
235 {
236 gx_device_clist_writer * const cdev =
237 &((gx_device_clist *)dev)->writer;
238 byte *data = init_data;
239 uint bits_size = data_size;
240 /*
241 * Partition the bits area between the hash table and the actual
242 * bitmaps. The per-bitmap overhead is about 24 bytes; if the
243 * average character size is 10 points, its bitmap takes about 24 +
244 * 0.5 * 10/72 * xdpi * 10/72 * ydpi / 8 bytes (the 0.5 being a
245 * fudge factor to account for characters being narrower than they
246 * are tall), which gives us a guideline for the size of the hash
247 * table.
248 */
249 uint avg_char_size =
250 (uint)(dev->HWResolution[0] * dev->HWResolution[1] *
251 (0.5 * 10 / 72 * 10 / 72 / 8)) + 24;
252 uint hc = bits_size / avg_char_size;
253 uint hsize;
254
255 while ((hc + 1) & hc)
256 hc |= hc >> 1; /* make mask (power of 2 - 1) */
257 if (hc < 0xff)
258 hc = 0xff; /* make allowance for halftone tiles */
259 else if (hc > 0xfff)
260 hc = 0xfff; /* cmd_op_set_tile_index has 12-bit operand */
261 /* Make sure the tables will fit. */
262 while (hc >= 3 && (hsize = (hc + 1) * sizeof(tile_hash)) >= bits_size)
263 hc >>= 1;
264 if (hc < 3)
265 return_error(gs_error_rangecheck);
266 cdev->tile_hash_mask = hc;
267 cdev->tile_max_count = hc - (hc >> 2);
268 cdev->tile_table = (tile_hash *) data;
269 data += hsize;
270 bits_size -= hsize;
271 gx_bits_cache_chunk_init(&cdev->chunk, data, bits_size);
272 gx_bits_cache_init(&cdev->bits, &cdev->chunk);
273 return 0;
274 }
275
276 /*
277 * Initialize the allocation for the bands. Requires: target. Sets:
278 * page_band_height (=page_info.band_params.BandHeight), nbands.
279 */
280 static int
clist_init_bands(gx_device * dev,gx_device_memory * bdev,uint data_size,int band_width,int band_height)281 clist_init_bands(gx_device * dev, gx_device_memory *bdev, uint data_size,
282 int band_width, int band_height)
283 {
284 gx_device_clist_writer * const cdev =
285 &((gx_device_clist *)dev)->writer;
286 int nbands;
287 ulong space;
288
289 if (dev->procs.open_device == pattern_clist_open_device) {
290 /* We don't need bands really. */
291 cdev->page_band_height = dev->height;
292 cdev->nbands = 1;
293 return 0;
294 }
295 if (gdev_mem_data_size(bdev, band_width, band_height, &space) < 0 ||
296 space > data_size)
297 return_error(gs_error_rangecheck);
298 cdev->page_band_height = band_height;
299 nbands = (cdev->target->height + band_height - 1) / band_height;
300 cdev->nbands = nbands;
301 #ifdef DEBUG
302 if (gs_debug_c('l') | gs_debug_c(':'))
303 dlprintf4("[:]width=%d, band_width=%d, band_height=%d, nbands=%d\n",
304 bdev->width, band_width, band_height, nbands);
305 #endif
306 return 0;
307 }
308
309 /*
310 * Initialize the allocation for the band states, which are used only
311 * when writing. Requires: nbands. Sets: states, cbuf, cend.
312 */
313 static int
clist_init_states(gx_device * dev,byte * init_data,uint data_size)314 clist_init_states(gx_device * dev, byte * init_data, uint data_size)
315 {
316 gx_device_clist_writer * const cdev =
317 &((gx_device_clist *)dev)->writer;
318 ulong state_size = cdev->nbands * (ulong) sizeof(gx_clist_state);
319 /* Align to the natural boundary for ARM processors, bug 689600 */
320 long alignment = (-(long)init_data) & (sizeof(init_data) - 1);
321
322 /*
323 * The +100 in the next line is bogus, but we don't know what the
324 * real check should be. We're effectively assuring that at least 100
325 * bytes will be available to buffer command operands.
326 */
327 if (state_size + sizeof(cmd_prefix) + cmd_largest_size + 100 + alignment > data_size)
328 return_error(gs_error_rangecheck);
329 /* The end buffer position is not affected by alignment */
330 cdev->cend = init_data + data_size;
331 init_data += alignment;
332 cdev->states = (gx_clist_state *) init_data;
333 cdev->cbuf = init_data + state_size;
334 return 0;
335 }
336
337 /*
338 * Initialize all the data allocations. Requires: target. Sets:
339 * page_tile_cache_size, page_info.band_params.BandWidth,
340 * page_info.band_params.BandBufferSpace, + see above.
341 */
342 static int
clist_init_data(gx_device * dev,byte * init_data,uint data_size)343 clist_init_data(gx_device * dev, byte * init_data, uint data_size)
344 {
345 gx_device_clist_writer * const cdev =
346 &((gx_device_clist *)dev)->writer;
347 gx_device *target = cdev->target;
348 /* BandWidth can't be smaller than target device width */
349 const int band_width =
350 cdev->page_info.band_params.BandWidth = max(target->width, cdev->band_params.BandWidth);
351 int band_height = cdev->band_params.BandHeight;
352 bool page_uses_transparency = cdev->page_uses_transparency;
353 const uint band_space =
354 cdev->page_info.band_params.BandBufferSpace =
355 (cdev->band_params.BandBufferSpace ?
356 cdev->band_params.BandBufferSpace : data_size);
357 byte *data = init_data;
358 uint size = band_space;
359 uint bits_size;
360 gx_device_memory bdev;
361 gx_device *pbdev = (gx_device *)&bdev;
362 int code;
363
364 /* the clist writer has its own color info that depends upon the
365 transparency group color space (if transparency exists). The data that is
366 used in the clist writing. Here it is initialized with
367 the target device color info. The values will be pushed and popped
368 in a stack if we have changing color spaces in the transparency groups. */
369
370 cdev->clist_color_info.depth = dev->color_info.depth;
371 cdev->clist_color_info.polarity = dev->color_info.polarity;
372 cdev->clist_color_info.num_components = dev->color_info.num_components;
373
374 /* Call create_buf_device to get the memory planarity set up. */
375 cdev->buf_procs.create_buf_device(&pbdev, target, 0, NULL, NULL, clist_get_band_complexity(0, 0));
376 /* HACK - if the buffer device can't do copy_alpha, disallow */
377 /* copy_alpha in the commmand list device as well. */
378 if (dev_proc(pbdev, copy_alpha) == gx_no_copy_alpha)
379 cdev->disable_mask |= clist_disable_copy_alpha;
380 if (cdev->procs.open_device == pattern_clist_open_device) {
381 bits_size = data_size / 2;
382 } else if (band_height) {
383 /*
384 * The band height is fixed, so the band buffer requirement
385 * is completely determined.
386 */
387 ulong band_data_size;
388
389 if (gdev_mem_data_size(&bdev, band_width, band_height, &band_data_size) < 0 ||
390 band_data_size >= band_space)
391 return_error(gs_error_rangecheck);
392 bits_size = min(band_space - band_data_size, data_size >> 1);
393 } else {
394 /*
395 * Choose the largest band height that will fit in the
396 * rendering-time buffer.
397 */
398 bits_size = clist_tile_cache_size(target, band_space);
399 bits_size = min(bits_size, data_size >> 1);
400 band_height = gdev_mem_max_height(&bdev, band_width,
401 band_space - bits_size, page_uses_transparency);
402 if (band_height == 0)
403 return_error(gs_error_rangecheck);
404 }
405 cdev->ins_count = 0;
406 code = clist_init_tile_cache(dev, data, bits_size);
407 if (code < 0)
408 return code;
409 cdev->page_tile_cache_size = bits_size;
410 data += bits_size;
411 size -= bits_size;
412 code = clist_init_bands(dev, &bdev, size, band_width, band_height);
413 if (code < 0)
414 return code;
415 return clist_init_states(dev, data, data_size - bits_size);
416 }
417 /*
418 * Reset the device state (for writing). This routine requires only
419 * data, data_size, and target to be set, and is idempotent.
420 */
421 static int
clist_reset(gx_device * dev)422 clist_reset(gx_device * dev)
423 {
424 gx_device_clist_writer * const cdev =
425 &((gx_device_clist *)dev)->writer;
426 int code = clist_init_data(dev, cdev->data, cdev->data_size);
427 int nbands;
428
429 if (code < 0)
430 return (cdev->permanent_error = code);
431 /* Now initialize the rest of the state. */
432 cdev->permanent_error = 0;
433 nbands = cdev->nbands;
434 cdev->ymin = cdev->ymax = -1; /* render_init not done yet */
435 memset(cdev->tile_table, 0, (cdev->tile_hash_mask + 1) *
436 sizeof(*cdev->tile_table));
437 cdev->cnext = cdev->cbuf;
438 cdev->ccl = 0;
439 cdev->band_range_list.head = cdev->band_range_list.tail = 0;
440 cdev->band_range_min = 0;
441 cdev->band_range_max = nbands - 1;
442 {
443 int band;
444 gx_clist_state *states = cdev->states;
445
446 for (band = 0; band < nbands; band++, states++) {
447 static const gx_clist_state cls_initial =
448 {cls_initial_values};
449
450 *states = cls_initial;
451 }
452 }
453 /*
454 * Round up the size of the per-tile band mask so that the bits,
455 * which follow it, stay aligned.
456 */
457 cdev->tile_band_mask_size =
458 ((nbands + (align_bitmap_mod * 8 - 1)) >> 3) &
459 ~(align_bitmap_mod - 1);
460 /*
461 * Initialize the all-band parameters to impossible values,
462 * to force them to be written the first time they are used.
463 */
464 memset(&cdev->tile_params, 0, sizeof(cdev->tile_params));
465 cdev->tile_depth = 0;
466 cdev->tile_known_min = nbands;
467 cdev->tile_known_max = -1;
468 cdev->imager_state = clist_imager_state_initial;
469 cdev->clip_path = NULL;
470 cdev->clip_path_id = gs_no_id;
471 cdev->color_space.byte1 = 0;
472 cdev->color_space.id = gs_no_id;
473 cdev->color_space.space = 0;
474 {
475 int i;
476
477 for (i = 0; i < countof(cdev->transfer_ids); ++i)
478 cdev->transfer_ids[i] = gs_no_id;
479 }
480 cdev->black_generation_id = gs_no_id;
481 cdev->undercolor_removal_id = gs_no_id;
482 cdev->device_halftone_id = gs_no_id;
483 cdev->image_enum_id = gs_no_id;
484 cdev->cropping_min = cdev->save_cropping_min = 0;
485 cdev->cropping_max = cdev->save_cropping_max = cdev->height;
486 cdev->cropping_saved = false;
487 cdev->cropping_stack = NULL;
488 cdev->cropping_level = 0;
489 cdev->mask_id_count = cdev->mask_id = cdev->temp_mask_id = 0;
490 return 0;
491 }
492 /*
493 * Initialize the device state (for writing). This routine requires only
494 * data, data_size, and target to be set, and is idempotent.
495 */
496 static int
clist_init(gx_device * dev)497 clist_init(gx_device * dev)
498 {
499 gx_device_clist_writer * const cdev =
500 &((gx_device_clist *)dev)->writer;
501 int code = clist_reset(dev);
502
503 if (code >= 0) {
504 cdev->image_enum_id = gs_no_id;
505 cdev->error_is_retryable = 0;
506 cdev->driver_call_nesting = 0;
507 cdev->ignore_lo_mem_warnings = 0;
508 }
509 return code;
510 }
511
512 /* (Re)init open band files for output (set block size, etc). */
513 static int /* ret 0 ok, -ve error code */
clist_reinit_output_file(gx_device * dev)514 clist_reinit_output_file(gx_device *dev)
515 { gx_device_clist_writer * const cdev =
516 &((gx_device_clist *)dev)->writer;
517 int code = 0;
518
519 /* bfile needs to guarantee cmd_blocks for: 1 band range, nbands */
520 /* & terminating entry */
521 int b_block = sizeof(cmd_block) * (cdev->nbands + 2);
522
523 /* cfile needs to guarantee one writer buffer */
524 /* + one end_clip cmd (if during image's clip path setup) */
525 /* + an end_image cmd for each band (if during image) */
526 /* + end_cmds for each band and one band range */
527 int c_block =
528 cdev->cend - cdev->cbuf + 2 + cdev->nbands * 2 + (cdev->nbands + 1);
529
530 /* All this is for partial page rendering's benefit, do only */
531 /* if partial page rendering is available */
532 if ( clist_test_VMerror_recoverable(cdev) )
533 { if (cdev->page_bfile != 0)
534 code = cdev->page_info.io_procs->set_memory_warning(cdev->page_bfile, b_block);
535 if (code >= 0 && cdev->page_cfile != 0)
536 code = cdev->page_info.io_procs->set_memory_warning(cdev->page_cfile, c_block);
537 }
538 return code;
539 }
540
541 /* Write out the current parameters that must be at the head of each page */
542 /* if async rendering is in effect */
543 static int
clist_emit_page_header(gx_device * dev)544 clist_emit_page_header(gx_device *dev)
545 {
546 gx_device_clist_writer * const cdev =
547 &((gx_device_clist *)dev)->writer;
548 int code = 0;
549
550 if ((cdev->disable_mask & clist_disable_pass_thru_params)) {
551 do
552 if ((code = clist_put_current_params(cdev)) >= 0)
553 break;
554 while ((code = clist_VMerror_recover(cdev, code)) >= 0);
555 cdev->permanent_error = (code < 0 ? code : 0);
556 if (cdev->permanent_error < 0)
557 cdev->error_is_retryable = 0;
558 }
559 return code;
560 }
561
562 /* Reset parameters for the beginning of a page. */
563 static void
clist_reset_page(gx_device_clist_writer * cwdev)564 clist_reset_page(gx_device_clist_writer *cwdev)
565 {
566 cwdev->page_bfile_end_pos = 0;
567 /* Indicate that the colors_used information hasn't been computed. */
568 cwdev->page_info.scan_lines_per_colors_used = 0;
569 memset(cwdev->page_info.band_colors_used, 0,
570 sizeof(cwdev->page_info.band_colors_used));
571 }
572
573 /* Open the device's bandfiles */
574 static int
clist_open_output_file(gx_device * dev)575 clist_open_output_file(gx_device *dev)
576 {
577 gx_device_clist_writer * const cdev =
578 &((gx_device_clist *)dev)->writer;
579 char fmode[4];
580 int code;
581
582 if (cdev->do_not_open_or_close_bandfiles)
583 return 0; /* external bandfile open/close managed externally */
584 cdev->page_cfile = 0; /* in case of failure */
585 cdev->page_bfile = 0; /* ditto */
586 code = clist_init(dev);
587 if (code < 0)
588 return code;
589 strcpy(fmode, "w+");
590 strcat(fmode, gp_fmode_binary_suffix);
591 cdev->page_cfname[0] = 0; /* create a new file */
592 cdev->page_bfname[0] = 0; /* ditto */
593 clist_reset_page(cdev);
594 if ((code = cdev->page_info.io_procs->fopen(cdev->page_cfname, fmode, &cdev->page_cfile,
595 cdev->bandlist_memory, cdev->bandlist_memory,
596 true)) < 0 ||
597 (code = cdev->page_info.io_procs->fopen(cdev->page_bfname, fmode, &cdev->page_bfile,
598 cdev->bandlist_memory, cdev->bandlist_memory,
599 false)) < 0 ||
600 (code = clist_reinit_output_file(dev)) < 0
601 ) {
602 clist_close_output_file(dev);
603 cdev->permanent_error = code;
604 cdev->error_is_retryable = 0;
605 }
606 return code;
607 }
608
609 /* Close, and free the contents of, the temporary files of a page. */
610 /* Note that this does not deallocate the buffer. */
611 int
clist_close_page_info(gx_band_page_info_t * ppi)612 clist_close_page_info(gx_band_page_info_t *ppi)
613 {
614 if (ppi->cfile != NULL) {
615 ppi->io_procs->fclose(ppi->cfile, ppi->cfname, true);
616 ppi->cfile = NULL;
617 }
618 if (ppi->bfile != NULL) {
619 ppi->io_procs->fclose(ppi->bfile, ppi->bfname, true);
620 ppi->bfile = NULL;
621 }
622 return 0;
623 }
624
625 /* Close the device by freeing the temporary files. */
626 /* Note that this does not deallocate the buffer. */
627 int
clist_close_output_file(gx_device * dev)628 clist_close_output_file(gx_device *dev)
629 {
630 gx_device_clist_writer * const cdev =
631 &((gx_device_clist *)dev)->writer;
632
633 return clist_close_page_info(&cdev->page_info);
634 }
635
636 /* Open the device by initializing the device state and opening the */
637 /* scratch files. */
638 int
clist_open(gx_device * dev)639 clist_open(gx_device *dev)
640 {
641 gx_device_clist_writer * const cdev =
642 &((gx_device_clist *)dev)->writer;
643 bool save_is_open = dev->is_open;
644 int code;
645
646 cdev->permanent_error = 0;
647 cdev->is_open = false;
648 code = clist_init(dev);
649 if (code < 0)
650 return code;
651 code = clist_open_output_file(dev);
652 if ( code >= 0)
653 code = clist_emit_page_header(dev);
654 if (code >= 0)
655 dev->is_open = save_is_open;
656 return code;
657 }
658
659 static int
clist_close(gx_device * dev)660 clist_close(gx_device *dev)
661 {
662 gx_device_clist_writer * const cdev =
663 &((gx_device_clist *)dev)->writer;
664
665 if (cdev->do_not_open_or_close_bandfiles)
666 return 0;
667 if (cdev->procs.open_device == pattern_clist_open_device) {
668 gs_free_object(cdev->bandlist_memory, cdev->data, "clist_close");
669 cdev->data = NULL;
670 }
671 return clist_close_output_file(dev);
672 }
673
674 /* The output_page procedure should never be called! */
675 static int
clist_output_page(gx_device * dev,int num_copies,int flush)676 clist_output_page(gx_device * dev, int num_copies, int flush)
677 {
678 return_error(gs_error_Fatal);
679 }
680
681 /* Reset (or prepare to append to) the command list after printing a page. */
682 int
clist_finish_page(gx_device * dev,bool flush)683 clist_finish_page(gx_device *dev, bool flush)
684 {
685 gx_device_clist_writer * const cdev = &((gx_device_clist *)dev)->writer;
686 int code;
687
688 /* If this is a reader clist, which is about to be reset to a writer,
689 * free any band_complexity_array memory used by same.
690 * since we have been rendering, shut down threads
691 */
692 if (!CLIST_IS_WRITER((gx_device_clist *)dev)) {
693 gx_clist_reader_free_band_complexity_array( (gx_device_clist *)dev );
694 clist_teardown_render_threads(dev);
695 }
696
697 if (flush) {
698 if (cdev->page_cfile != 0)
699 cdev->page_info.io_procs->rewind(cdev->page_cfile, true, cdev->page_cfname);
700 if (cdev->page_bfile != 0)
701 cdev->page_info.io_procs->rewind(cdev->page_bfile, true, cdev->page_bfname);
702 clist_reset_page(cdev);
703 } else {
704 if (cdev->page_cfile != 0)
705 cdev->page_info.io_procs->fseek(cdev->page_cfile, 0L, SEEK_END, cdev->page_cfname);
706 if (cdev->page_bfile != 0)
707 cdev->page_info.io_procs->fseek(cdev->page_bfile, 0L, SEEK_END, cdev->page_bfname);
708 }
709 code = clist_init(dev); /* reinitialize */
710 if (code >= 0)
711 code = clist_reinit_output_file(dev);
712 if (code >= 0)
713 code = clist_emit_page_header(dev);
714
715 return code;
716 }
717
718 /* ------ Writing ------ */
719
720 /* End a page by flushing the buffer and terminating the command list. */
721 int /* ret 0 all-ok, -ve error code, or +1 ok w/low-mem warning */
clist_end_page(gx_device_clist_writer * cldev)722 clist_end_page(gx_device_clist_writer * cldev)
723 {
724 int code = cmd_write_buffer(cldev, cmd_opv_end_page);
725 cmd_block cb;
726 int ecode = 0;
727
728 if (code >= 0) {
729 /*
730 * Write the terminating entry in the block file.
731 * Note that because of copypage, there may be many such entries.
732 */
733 cb.band_min = cb.band_max = cmd_band_end;
734 cb.pos = (cldev->page_cfile == 0 ? 0 : cldev->page_info.io_procs->ftell(cldev->page_cfile));
735 code = cldev->page_info.io_procs->fwrite_chars(&cb, sizeof(cb), cldev->page_bfile);
736 if (code > 0)
737 code = 0;
738 }
739 if (code >= 0) {
740 clist_compute_colors_used(cldev);
741 ecode |= code;
742 cldev->page_bfile_end_pos = cldev->page_info.io_procs->ftell(cldev->page_bfile);
743 }
744 if (code < 0)
745 ecode = code;
746
747 /* Reset warning margin to 0 to release reserve memory if mem files */
748 if (cldev->page_bfile != 0)
749 cldev->page_info.io_procs->set_memory_warning(cldev->page_bfile, 0);
750 if (cldev->page_cfile != 0)
751 cldev->page_info.io_procs->set_memory_warning(cldev->page_cfile, 0);
752
753 #ifdef DEBUG
754 if (gs_debug_c('l') | gs_debug_c(':'))
755 dlprintf2("[:]clist_end_page at cfile=%ld, bfile=%ld\n",
756 (long)cb.pos, (long)cldev->page_bfile_end_pos);
757 #endif
758 return 0;
759 }
760
761 /* Compute the set of used colors in the page_info structure.
762 *
763 * NB: Area for improvement, move states[band] and page_info to clist
764 * rather than writer device, or remove completely as this is used by the old planar devices
765 * to operate on a plane at a time.
766 */
767
768 void
clist_compute_colors_used(gx_device_clist_writer * cldev)769 clist_compute_colors_used(gx_device_clist_writer *cldev)
770 {
771 int nbands = cldev->nbands;
772 int bands_per_colors_used =
773 (nbands + PAGE_INFO_NUM_COLORS_USED - 1) /
774 PAGE_INFO_NUM_COLORS_USED;
775 int band;
776
777 cldev->page_info.scan_lines_per_colors_used =
778 cldev->page_band_height * bands_per_colors_used;
779 memset(cldev->page_info.band_colors_used, 0,
780 sizeof(cldev->page_info.band_colors_used));
781 for (band = 0; band < nbands; ++band) {
782 int entry = band / bands_per_colors_used;
783
784 cldev->page_info.band_colors_used[entry].or |=
785 cldev->states[band].colors_used.or;
786 cldev->page_info.band_colors_used[entry].slow_rop |=
787 cldev->states[band].colors_used.slow_rop;
788
789 }
790 }
791
792 /* Recover recoverable VM error if possible without flushing */
793 int /* ret -ve err, >= 0 if recovered w/# = cnt pages left in page queue */
clist_VMerror_recover(gx_device_clist_writer * cldev,int old_error_code)794 clist_VMerror_recover(gx_device_clist_writer *cldev,
795 int old_error_code)
796 {
797 int code = old_error_code;
798 int pages_remain;
799
800 if (!clist_test_VMerror_recoverable(cldev) ||
801 !cldev->error_is_retryable ||
802 old_error_code != gs_error_VMerror
803 )
804 return old_error_code;
805
806 /* Do some rendering, return if enough memory is now free */
807 do {
808 pages_remain =
809 (*cldev->free_up_bandlist_memory)( (gx_device *)cldev, false );
810 if (pages_remain < 0) {
811 code = pages_remain; /* abort, error or interrupt req */
812 break;
813 }
814 if (clist_reinit_output_file( (gx_device *)cldev ) == 0) {
815 code = pages_remain; /* got enough memory to continue */
816 break;
817 }
818 } while (pages_remain);
819
820 if_debug1('L', "[L]soft flush of command list, status: %d\n", code);
821 return code;
822 }
823
824 /* If recoverable VM error, flush & try to recover it */
825 int /* ret 0 ok, else -ve error */
clist_VMerror_recover_flush(gx_device_clist_writer * cldev,int old_error_code)826 clist_VMerror_recover_flush(gx_device_clist_writer *cldev,
827 int old_error_code)
828 {
829 int free_code = 0;
830 int reset_code = 0;
831 int code;
832
833 /* If the device has the ability to render partial pages, flush
834 * out the bandlist, and reset the writing state. Then, get the
835 * device to render this band. When done, see if there's now enough
836 * memory to satisfy the minimum low-memory guarantees. If not,
837 * get the device to render some more. If there's nothing left to
838 * render & still insufficient memory, declare an error condition.
839 */
840 if (!clist_test_VMerror_recoverable(cldev) ||
841 old_error_code != gs_error_VMerror
842 )
843 return old_error_code; /* sorry, don't have any means to recover this error */
844 free_code = (*cldev->free_up_bandlist_memory)( (gx_device *)cldev, true );
845
846 /* Reset the state of bands to "don't know anything" */
847 reset_code = clist_reset( (gx_device *)cldev );
848 if (reset_code >= 0)
849 reset_code = clist_open_output_file( (gx_device *)cldev );
850 if ( reset_code >= 0 &&
851 (cldev->disable_mask & clist_disable_pass_thru_params)
852 )
853 reset_code = clist_put_current_params(cldev);
854 if (reset_code < 0) {
855 cldev->permanent_error = reset_code;
856 cldev->error_is_retryable = 0;
857 }
858
859 code = (reset_code < 0 ? reset_code : free_code < 0 ? old_error_code : 0);
860 if_debug1('L', "[L]hard flush of command list, status: %d\n", code);
861 return code;
862 }
863
864 /* Write the target device's current parameter list */
865 static int /* ret 0 all ok, -ve error */
clist_put_current_params(gx_device_clist_writer * cldev)866 clist_put_current_params(gx_device_clist_writer *cldev)
867 {
868 gx_device *target = cldev->target;
869 gs_c_param_list param_list;
870 int code;
871
872 /*
873 * If a put_params call fails, the device will be left in a closed
874 * state, but higher-level code won't notice this fact. We flag this by
875 * setting permanent_error, which prevents writing to the command list.
876 */
877
878 if (cldev->permanent_error)
879 return cldev->permanent_error;
880 gs_c_param_list_write(¶m_list, cldev->memory);
881 code = (*dev_proc(target, get_params))
882 (target, (gs_param_list *)¶m_list);
883 if (code >= 0) {
884 gs_c_param_list_read(¶m_list);
885 code = cmd_put_params( cldev, (gs_param_list *)¶m_list );
886 }
887 gs_c_param_list_release(¶m_list);
888
889 return code;
890 }
891
892 /* ---------------- Driver interface ---------------- */
893
894 static int
clist_get_band(gx_device * dev,int y,int * band_start)895 clist_get_band(gx_device * dev, int y, int *band_start)
896 {
897 gx_device_clist_writer * const cdev =
898 &((gx_device_clist *)dev)->writer;
899 int band_height = cdev->page_band_height;
900 int start;
901
902 if (y < 0)
903 y = 0;
904 else if (y >= dev->height)
905 y = dev->height;
906 *band_start = start = y - y % band_height;
907 return min(dev->height - start, band_height);
908 }
909
910 /* copy constructor if from != NULL
911 * default constructor if from == NULL
912 */
913 void
clist_copy_band_complexity(gx_band_complexity_t * this,const gx_band_complexity_t * from)914 clist_copy_band_complexity(gx_band_complexity_t *this, const gx_band_complexity_t *from)
915 {
916 if (from) {
917 memcpy(this, from, sizeof(gx_band_complexity_t));
918 } else {
919 /* default */
920 this->uses_color = false;
921 this->nontrivial_rops = false;
922 #if 0
923 /* todo: halftone phase */
924
925 this->x0 = 0;
926 this->y0 = 0;
927 #endif
928 }
929 }
930
931 int
clist_writer_push_no_cropping(gx_device_clist_writer * cdev)932 clist_writer_push_no_cropping(gx_device_clist_writer *cdev)
933 {
934 clist_writer_cropping_buffer_t *buf = gs_alloc_struct(cdev->memory,
935 clist_writer_cropping_buffer_t,
936 &st_clist_writer_cropping_buffer, "clist_writer_transparency_push");
937
938 if (buf == NULL)
939 return_error(gs_error_VMerror);
940 if_debug1('v', "[v]push cropping[%d]\n", cdev->cropping_level);
941 buf->next = cdev->cropping_stack;
942 cdev->cropping_stack = buf;
943 buf->cropping_min = cdev->cropping_min;
944 buf->cropping_max = cdev->cropping_max;
945 buf->mask_id = cdev->mask_id;
946 buf->temp_mask_id = cdev->temp_mask_id;
947 cdev->cropping_level++;
948 return 0;
949 }
950
951 int
clist_writer_push_cropping(gx_device_clist_writer * cdev,int ry,int rheight)952 clist_writer_push_cropping(gx_device_clist_writer *cdev, int ry, int rheight)
953 {
954 int code = clist_writer_push_no_cropping(cdev);
955
956 if (code < 0)
957 return 0;
958 cdev->cropping_min = max(cdev->cropping_min, ry);
959 cdev->cropping_max = min(cdev->cropping_max, ry + rheight);
960 return 0;
961 }
962
963 int
clist_writer_pop_cropping(gx_device_clist_writer * cdev)964 clist_writer_pop_cropping(gx_device_clist_writer *cdev)
965 {
966 clist_writer_cropping_buffer_t *buf = cdev->cropping_stack;
967
968 if (buf == NULL)
969 return_error(gs_error_unregistered); /*Must not happen. */
970 cdev->cropping_min = buf->cropping_min;
971 cdev->cropping_max = buf->cropping_max;
972 cdev->mask_id = buf->mask_id;
973 cdev->temp_mask_id = buf->temp_mask_id;
974 cdev->cropping_stack = buf->next;
975 cdev->cropping_level--;
976 if_debug1('v', "[v]pop cropping[%d]\n", cdev->cropping_level);
977 gs_free_object(cdev->memory, buf, "clist_writer_transparency_pop");
978 return 0;
979 }
980
981 int
clist_writer_check_empty_cropping_stack(gx_device_clist_writer * cdev)982 clist_writer_check_empty_cropping_stack(gx_device_clist_writer *cdev)
983 {
984 if (cdev->cropping_stack != NULL) {
985 if_debug1('v', "[v]Error: left %d cropping(s)\n", cdev->cropping_level);
986 return_error(gs_error_unregistered); /* Must not happen */
987 }
988 return 0;
989 }
990
991 /* Retrieve total size for cfile and bfile. */
clist_data_size(const gx_device_clist * cdev,int select)992 int clist_data_size(const gx_device_clist *cdev, int select)
993 {
994 const gx_band_page_info_t *pinfo = &cdev->common.page_info;
995 clist_file_ptr pfile = (!select ? pinfo->bfile : pinfo->cfile);
996 const char *fname = (!select ? pinfo->bfname : pinfo->cfname);
997 int code, size;
998
999 code = pinfo->io_procs->fseek(pfile, 0, SEEK_END, fname);
1000 if (code < 0)
1001 return_error(gs_error_unregistered); /* Must not happen. */
1002 code = pinfo->io_procs->ftell(pfile);
1003 if (code < 0)
1004 return_error(gs_error_unregistered); /* Must not happen. */
1005 size = code;
1006 return size;
1007 }
1008
1009 /* Get command list data. */
1010 int
clist_get_data(const gx_device_clist * cdev,int select,int offset,byte * buf,int length)1011 clist_get_data(const gx_device_clist *cdev, int select, int offset, byte *buf, int length)
1012 {
1013 const gx_band_page_info_t *pinfo = &cdev->common.page_info;
1014 clist_file_ptr pfile = (!select ? pinfo->bfile : pinfo->cfile);
1015 const char *fname = (!select ? pinfo->bfname : pinfo->cfname);
1016 int code;
1017
1018 code = pinfo->io_procs->fseek(pfile, offset, SEEK_SET, fname);
1019 if (code < 0)
1020 return_error(gs_error_unregistered); /* Must not happen. */
1021 /* This assumes that fread_chars doesn't return prematurely
1022 when the buffer is not fully filled and the end of stream is not reached. */
1023 return pinfo->io_procs->fread_chars(buf, length, pfile);
1024 }
1025
1026 /* Put command list data. */
1027 int
clist_put_data(const gx_device_clist * cdev,int select,int offset,const byte * buf,int length)1028 clist_put_data(const gx_device_clist *cdev, int select, int offset, const byte *buf, int length)
1029 {
1030 const gx_band_page_info_t *pinfo = &cdev->common.page_info;
1031 clist_file_ptr pfile = (!select ? pinfo->bfile : pinfo->cfile);
1032 int code;
1033
1034 code = pinfo->io_procs->ftell(pfile);
1035 if (code < 0)
1036 return_error(gs_error_unregistered); /* Must not happen. */
1037 if (code != offset) {
1038 /* Assuming a consecutive writing only. */
1039 return_error(gs_error_unregistered); /* Must not happen. */
1040 }
1041 /* This assumes that fwrite_chars doesn't return prematurely
1042 when the buffer is not fully written, except with an error. */
1043 return pinfo->io_procs->fwrite_chars(buf, length, pfile);
1044 }
1045
1046