1 /* Copyright (C) 2001-2019 Artifex Software, Inc.
2    All Rights Reserved.
3 
4    This software is provided AS-IS with no warranty, either express or
5    implied.
6 
7    This software is distributed under license and may not be copied,
8    modified or distributed except as expressly authorized under the terms
9    of the license contained in the file LICENSE in this distribution.
10 
11    Refer to licensing information at http://www.artifex.com or contact
12    Artifex Software, Inc.,  1305 Grant Avenue - Suite 200, Novato,
13    CA 94945, U.S.A., +1(415)492-9861, for further information.
14 */
15 
16 
17 /* Command list document- and page-level code. */
18 #include "memory_.h"
19 #include "string_.h"
20 #include "gx.h"
21 #include "gp.h"
22 #include "gpcheck.h"
23 #include "gserrors.h"
24 #include "gxdevice.h"
25 #include "gxdevmem.h"           /* must precede gxcldev.h */
26 #include "gxcldev.h"
27 #include "gxclpath.h"
28 #include "gsparams.h"
29 #include "gxdcolor.h"
30 #include "gscms.h"
31 #include "gsicc_manage.h"
32 #include "gsicc_cache.h"
33 #include "gxdevsop.h"
34 
35 #include "valgrind.h"
36 
37 extern dev_proc_open_device(pattern_clist_open_device);
38 
39 /* GC information */
40 /*  Where is the GC information for the common objects that are
41     shared between the reader and writer.  I see pointers in
42     there, but they don't seem to be GC.  This is why I have
43     put the icc_table and the link cache in the reader and the
44     writer rather than the common.   fixme: Also, if icc_cache_cl is not
45     included in the writer, 64bit builds will seg fault */
46 
47 extern_st(st_gs_gstate);
48 static
49 ENUM_PTRS_WITH(device_clist_enum_ptrs, gx_device_clist *cdev)
50     if (index < st_device_forward_max_ptrs) {
51         gs_ptr_type_t ret = ENUM_USING_PREFIX(st_device_forward, st_device_max_ptrs);
52 
53         return (ret ? ret : ENUM_OBJ(0));
54     }
55     index -= st_device_forward_max_ptrs;
56     /* RJW: We do not enumerate icc_cache_cl or icc_cache_list as they
57      * are allocated in non gc space */
58     if (CLIST_IS_WRITER(cdev)) {
59         switch (index) {
60         case 0: return ENUM_OBJ((cdev->writer.image_enum_id != gs_no_id ?
61                      cdev->writer.clip_path : 0));
62         case 1: return ENUM_OBJ((cdev->writer.image_enum_id != gs_no_id ?
63                      cdev->writer.color_space.space : 0));
64         case 2: return ENUM_OBJ(cdev->writer.pinst);
65         case 3: return ENUM_OBJ(cdev->writer.cropping_stack);
66         case 4: return ENUM_OBJ(cdev->writer.icc_table);
67         default:
68         return ENUM_USING(st_gs_gstate, &cdev->writer.gs_gstate,
69                   sizeof(gs_gstate), index - 5);
70         }
71     }
72     else {
73         /* 041207
74          * clist is reader.
75          * We don't expect this code to be exercised at this time as the reader
76          * runs under gdev_prn_output_page which is an atomic function of the
77          * interpreter. We do this as this situation may change in the future.
78          */
79 
80         if (index == 0)
81             return ENUM_OBJ(cdev->reader.offset_map);
82         else if (index == 1)
83             return ENUM_OBJ(cdev->reader.icc_table);
84         else if (index == 2)
85             return ENUM_OBJ(cdev->reader.color_usage_array);
86         else
87             return 0;
88     }
89 ENUM_PTRS_END
90 static
RELOC_PTRS_WITH(device_clist_reloc_ptrs,gx_device_clist * cdev)91 RELOC_PTRS_WITH(device_clist_reloc_ptrs, gx_device_clist *cdev)
92 {
93     RELOC_PREFIX(st_device_forward);
94     if (CLIST_IS_WRITER(cdev)) {
95         if (cdev->writer.image_enum_id != gs_no_id) {
96             RELOC_VAR(cdev->writer.clip_path);
97             RELOC_VAR(cdev->writer.color_space.space);
98         }
99         RELOC_VAR(cdev->writer.pinst);
100         RELOC_VAR(cdev->writer.cropping_stack);
101         RELOC_VAR(cdev->writer.icc_table);
102         RELOC_USING(st_gs_gstate, &cdev->writer.gs_gstate,
103             sizeof(gs_gstate));
104     } else {
105         /* 041207
106          * clist is reader.
107          * See note above in ENUM_PTRS_WITH section.
108          */
109         RELOC_VAR(cdev->reader.offset_map);
110         RELOC_VAR(cdev->reader.icc_table);
111         RELOC_VAR(cdev->reader.color_usage_array);
112     }
113 } RELOC_PTRS_END
114 public_st_device_clist();
115 private_st_clist_writer_cropping_buffer();
116 private_st_clist_icctable_entry();
117 private_st_clist_icctable();
118 
119 /* Forward declarations of driver procedures */
120 dev_proc_open_device(clist_open);
121 dev_proc_output_page(clist_output_page);
122 static dev_proc_close_device(clist_close);
123 static dev_proc_get_band(clist_get_band);
124 /* Driver procedures defined in other files are declared in gxcldev.h. */
125 
126 /* Other forward declarations */
127 static int clist_put_current_params(gx_device_clist_writer *cldev);
128 
129 /* The device procedures */
130 const gx_device_procs gs_clist_device_procs = {
131     clist_open,
132     gx_forward_get_initial_matrix,
133     gx_default_sync_output,
134     clist_output_page,
135     clist_close,
136     gx_forward_map_rgb_color,
137     gx_forward_map_color_rgb,
138     clist_fill_rectangle,
139     gx_default_tile_rectangle,
140     clist_copy_mono,
141     clist_copy_color,
142     gx_default_draw_line,
143     gx_default_get_bits,
144     gx_forward_get_params,
145     gx_forward_put_params,
146     gx_forward_map_cmyk_color,
147     gx_forward_get_xfont_procs,
148     gx_forward_get_xfont_device,
149     gx_forward_map_rgb_alpha_color,
150     gx_forward_get_page_device,
151     gx_forward_get_alpha_bits,
152     clist_copy_alpha,
153     clist_get_band,
154     gx_default_copy_rop,
155     clist_fill_path,
156     clist_stroke_path,
157     clist_fill_mask,
158     clist_fill_trapezoid,
159     clist_fill_parallelogram,
160     clist_fill_triangle,
161     gx_default_draw_thin_line,
162     gx_default_begin_image,
163     gx_default_image_data,
164     gx_default_end_image,
165     clist_strip_tile_rectangle,
166     clist_strip_copy_rop,
167     gx_forward_get_clipping_box,
168     clist_begin_typed_image,
169     clist_get_bits_rectangle,
170     gx_forward_map_color_rgb_alpha,
171     clist_create_compositor,
172     gx_forward_get_hardware_params,
173     gx_default_text_begin,
174     gx_default_finish_copydevice,
175     gx_default_begin_transparency_group,                       /* begin_transparency_group */
176     gx_default_end_transparency_group,                       /* end_transparency_group */
177     gx_default_begin_transparency_mask,                       /* begin_transparency_mask */
178     gx_default_end_transparency_mask,                       /* end_transparency_mask */
179     gx_default_discard_transparency_layer,                       /* discard_transparency_layer */
180     gx_forward_get_color_mapping_procs,
181     gx_forward_get_color_comp_index,
182     gx_forward_encode_color,
183     gx_forward_decode_color,
184     gx_default_pattern_manage,                       /* pattern_manage */
185     clist_fill_rectangle_hl_color,
186     gx_default_include_color_space,
187     gx_default_fill_linear_color_scanline,
188     clist_fill_linear_color_trapezoid,
189     clist_fill_linear_color_triangle,
190     gx_forward_update_spot_equivalent_colors,
191     gx_forward_ret_devn_params,
192     clist_fillpage,
193     gx_default_push_transparency_state,                      /* push_transparency_state */
194     gx_default_pop_transparency_state,                      /* pop_transparency_state */
195     gx_default_put_image,                      /* put_image */
196     clist_dev_spec_op,
197     clist_copy_planes,         /* copy planes */
198     gx_default_get_profile,
199     gx_default_set_graphics_type_tag,
200     clist_strip_copy_rop2,
201     clist_strip_tile_rect_devn,
202     clist_copy_alpha_hl_color,
203     clist_process_page,
204     gx_default_transform_pixel_region,
205     clist_fill_stroke_path,
206 };
207 
208 /*------------------- Choose the implementation -----------------------
209 
210    For chossing the clist i/o implementation by makefile options
211    we define global variables, which are initialized with
212    file/memory io procs when they are included into the build.
213  */
214 const clist_io_procs_t *clist_io_procs_file_global = NULL;
215 const clist_io_procs_t *clist_io_procs_memory_global = NULL;
216 
217 void
clist_init_io_procs(gx_device_clist * pclist_dev,bool in_memory)218 clist_init_io_procs(gx_device_clist *pclist_dev, bool in_memory)
219 {
220 #ifdef PACIFY_VALGRIND
221     VALGRIND_HG_DISABLE_CHECKING(&clist_io_procs_file_global, sizeof(clist_io_procs_file_global));
222     VALGRIND_HG_DISABLE_CHECKING(&clist_io_procs_memory_global, sizeof(clist_io_procs_memory_global));
223 #endif
224     /* if clist_io_procs_file_global is NULL, then BAND_LIST_STORAGE=memory */
225     /* was specified in the build, and "file" is not available */
226     if (in_memory || clist_io_procs_file_global == NULL)
227         pclist_dev->common.page_info.io_procs = clist_io_procs_memory_global;
228     else
229         pclist_dev->common.page_info.io_procs = clist_io_procs_file_global;
230 }
231 
232 /* ------ Define the command set and syntax ------ */
233 
234 /*
235  * The buffer area (data, data_size) holds a bitmap cache when both writing
236  * and reading.  The rest of the space is used for the command buffer and
237  * band state bookkeeping when writing, and for the rendering buffer (image
238  * device) when reading.  For the moment, we divide the space up
239  * arbitrarily, except that we allocate less space for the bitmap cache if
240  * the device doesn't need halftoning.
241  *
242  * All the routines for allocating tables in the buffer are idempotent, so
243  * they can be used to check whether a given-size buffer is large enough.
244  */
245 
246 /*
247  * Calculate the desired size for the tile cache.
248  */
249 static uint
clist_tile_cache_size(const gx_device * target,uint data_size)250 clist_tile_cache_size(const gx_device * target, uint data_size)
251 {
252     uint bits_size =
253     (data_size / 5) & -align_cached_bits_mod;   /* arbitrary */
254 
255     if (!gx_device_must_halftone(target)) {     /* No halftones -- cache holds only Patterns & characters. */
256         bits_size -= bits_size >> 2;
257     }
258 #define min_bits_size 1024
259     if (bits_size < min_bits_size)
260         bits_size = min_bits_size;
261 #undef min_bits_size
262     return bits_size;
263 }
264 
265 /*
266  * Initialize the allocation for the tile cache.  Sets: tile_hash_mask,
267  * tile_max_count, tile_table, chunk (structure), bits (structure).
268  */
269 static int
clist_init_tile_cache(gx_device * dev,byte * init_data,ulong data_size)270 clist_init_tile_cache(gx_device * dev, byte * init_data, ulong data_size)
271 {
272     gx_device_clist_writer * const cdev =
273         &((gx_device_clist *)dev)->writer;
274     byte *data = init_data;
275     uint bits_size = data_size;
276     /*
277      * Partition the bits area between the hash table and the actual
278      * bitmaps.  The per-bitmap overhead is about 24 bytes; if the
279      * average character size is 10 points, its bitmap takes about 24 +
280      * 0.5 * 10/72 * xdpi * 10/72 * ydpi / 8 bytes (the 0.5 being a
281      * fudge factor to account for characters being narrower than they
282      * are tall), which gives us a guideline for the size of the hash
283      * table.
284      */
285     uint avg_char_size =
286         (uint)(dev->HWResolution[0] * dev->HWResolution[1] *
287                (0.5 * 10 / 72 * 10 / 72 / 8)) + 24;
288     uint hc = bits_size / avg_char_size;
289     uint hsize;
290 
291     while ((hc + 1) & hc)
292         hc |= hc >> 1;          /* make mask (power of 2 - 1) */
293     if (hc < 0xff)
294         hc = 0xff;              /* make allowance for halftone tiles */
295     else if (hc > 0xfff)
296         hc = 0xfff;             /* cmd_op_set_tile_index has 12-bit operand */
297     /* Make sure the tables will fit. */
298     while (hc >= 3 && (hsize = (hc + 1) * sizeof(tile_hash)) >= bits_size)
299         hc >>= 1;
300     if (hc < 3)
301         return_error(gs_error_rangecheck);
302     cdev->tile_hash_mask = hc;
303     cdev->tile_max_count = hc - (hc >> 2);
304     cdev->tile_table = (tile_hash *) data;
305     data += hsize;
306     bits_size -= hsize;
307     gx_bits_cache_chunk_init(cdev->cache_chunk, data, bits_size);
308     gx_bits_cache_init(&cdev->bits, cdev->cache_chunk);
309     return 0;
310 }
311 
312 /*
313  * Initialize the allocation for the bands.  Requires: target.  Sets:
314  * page_band_height (=page_info.band_params.BandHeight), nbands.
315  */
316 static int
clist_init_bands(gx_device * dev,gx_device_memory * bdev,uint data_size,int band_width,int band_height)317 clist_init_bands(gx_device * dev, gx_device_memory *bdev, uint data_size,
318                  int band_width, int band_height)
319 {
320     gx_device_clist_writer * const cdev =
321         &((gx_device_clist *)dev)->writer;
322     int nbands;
323     ulong space;
324 
325     if (dev_proc(dev, open_device) == pattern_clist_open_device) {
326         /* We don't need bands really. */
327         cdev->page_band_height = dev->height;
328         cdev->nbands = 1;
329         return 0;
330     }
331     if (gdev_mem_data_size(bdev, band_width, band_height, &space) < 0 ||
332         space > data_size)
333         return_error(gs_error_rangecheck);
334     cdev->page_band_height = band_height;
335     nbands = (cdev->target->height + band_height - 1) / band_height;
336     cdev->nbands = nbands;
337 #ifdef DEBUG
338     if (gs_debug_c('l') | gs_debug_c(':'))
339         dmlprintf4(dev->memory, "[:]width=%d, band_width=%d, band_height=%d, nbands=%d\n",
340                    bdev->width, band_width, band_height, nbands);
341 #endif
342     return 0;
343 }
344 
345 /*
346  * Initialize the allocation for the band states, which are used only
347  * when writing.  Requires: nbands.  Sets: states, cbuf, cend.
348  */
349 static int
clist_init_states(gx_device * dev,byte * init_data,uint data_size)350 clist_init_states(gx_device * dev, byte * init_data, uint data_size)
351 {
352     gx_device_clist_writer * const cdev =
353         &((gx_device_clist *)dev)->writer;
354     ulong state_size = cdev->nbands * (ulong) sizeof(gx_clist_state);
355     /* Align to the natural boundary for ARM processors, bug 689600 */
356     long alignment = (-(long)init_data) & (sizeof(init_data) - 1);
357 
358     /*
359      * The +100 in the next line is bogus, but we don't know what the
360      * real check should be. We're effectively assuring that at least 100
361      * bytes will be available to buffer command operands.
362      */
363     if (state_size + sizeof(cmd_prefix) + cmd_largest_size + 100 + alignment > data_size)
364         return_error(gs_error_rangecheck);
365     /* The end buffer position is not affected by alignment */
366     cdev->cend = init_data + data_size;
367     init_data +=  alignment;
368     cdev->states = (gx_clist_state *) init_data;
369     cdev->cbuf = init_data + state_size;
370     return 0;
371 }
372 
373 /*
374  * Initialize all the data allocations.  Requires: target.  Sets:
375  * page_tile_cache_size, page_info.band_params.BandWidth,
376  * page_info.band_params.BandBufferSpace, + see above.
377  */
378 static int
clist_init_data(gx_device * dev,byte * init_data,uint data_size)379 clist_init_data(gx_device * dev, byte * init_data, uint data_size)
380 {
381     gx_device_clist_writer * const cdev =
382         &((gx_device_clist *)dev)->writer;
383     gx_device *target = cdev->target;
384     /* BandWidth can't be smaller than target device width */
385     const int band_width =
386         cdev->page_info.band_params.BandWidth = max(target->width, cdev->band_params.BandWidth);
387     int band_height = cdev->band_params.BandHeight;
388     bool page_uses_transparency = cdev->page_uses_transparency;
389     const uint band_space =
390     cdev->page_info.band_params.BandBufferSpace =
391         (cdev->band_params.BandBufferSpace ?
392          cdev->band_params.BandBufferSpace : data_size);
393     byte *data = init_data;
394     uint size = band_space;
395     uint bits_size;
396     gx_device_memory bdev;
397     gx_device *pbdev = (gx_device *)&bdev;
398     int code;
399     int align = 1 << (target->log2_align_mod > log2_align_bitmap_mod ? target->log2_align_mod : log2_align_bitmap_mod);
400 
401     /* the clist writer has its own color info that depends upon the
402        transparency group color space (if transparency exists).  The data that is
403        used in the clist writing. Here it is initialized with
404        the target device color info.  The values will be pushed and popped
405        in a stack if we have changing color spaces in the transparency groups. */
406 
407     cdev->clist_color_info.depth = dev->color_info.depth;
408     cdev->clist_color_info.polarity = dev->color_info.polarity;
409     cdev->clist_color_info.num_components = dev->color_info.num_components;
410     cdev->graphics_type_tag = target->graphics_type_tag;	/* initialize to same as target */
411 
412     /* Call create_buf_device to get the memory planarity set up. */
413     code = cdev->buf_procs.create_buf_device(&pbdev, target, 0, NULL, NULL, NULL);
414     if (code < 0)
415         return code;
416     /* HACK - if the buffer device can't do copy_alpha, disallow */
417     /* copy_alpha in the commmand list device as well. */
418     if (dev_proc(pbdev, copy_alpha) == gx_no_copy_alpha)
419         cdev->disable_mask |= clist_disable_copy_alpha;
420     if (dev_proc(cdev, open_device) == pattern_clist_open_device) {
421         bits_size = data_size / 2;
422         cdev->page_line_ptrs_offset = 0;
423     } else {
424         if (band_height) {
425             /*
426              * The band height is fixed, so the band buffer requirement
427              * is completely determined.
428              */
429             ulong band_data_size;
430             int adjusted;
431 
432             adjusted = (dev_proc(dev, dev_spec_op)(dev, gxdso_adjust_bandheight, NULL, band_height));
433             if (adjusted > 0)
434                 band_height = adjusted;
435 
436             if (gdev_mem_data_size(&bdev, band_width, band_height, &band_data_size) < 0 ||
437                 band_data_size >= band_space) {
438                 if (pbdev->finalize)
439                     pbdev->finalize(pbdev);
440                 return_error(gs_error_rangecheck);
441             }
442             /* If the tile_cache_size is specified, use it */
443             if (cdev->space_params.band.tile_cache_size == 0) {
444                 bits_size = min(band_space - band_data_size, data_size >> 1);
445             } else {
446                 bits_size = cdev->space_params.band.tile_cache_size;
447             }
448             /* The top of the tile_cache is the bottom of the imagable band buffer,
449              * which needs to be appropriately aligned. Because the band height is
450              * fixed, we must round *down* the size of the cache to a appropriate
451              * value. See clist_render_thread() and clist_rasterize_lines()
452              * for where the value is used.
453              */
454             bits_size = ROUND_DOWN(bits_size, align);
455         } else {
456             int adjusted;
457             /*
458              * Choose the largest band height that will fit in the
459              * rendering-time buffer.
460              */
461             bits_size = clist_tile_cache_size(target, band_space);
462             bits_size = min(bits_size, data_size >> 1);
463             /* The top of the tile_cache is the bottom of the imagable band buffer,
464              * which needs to be appropriately aligned. Because the band height is
465              * fixed, here we round up the size of the cache, since the band height
466              * is variable, and it should only be a few bytes. See clist_render_thread()
467              * and clist_rasterize_lines() for where the value is used.
468              */
469             bits_size = ROUND_UP(bits_size, align);
470             band_height = gdev_mem_max_height(&bdev, band_width,
471                               band_space - bits_size, page_uses_transparency);
472             if (band_height == 0) {
473                 if (pbdev->finalize)
474                     pbdev->finalize(pbdev);
475                 return_error(gs_error_rangecheck);
476             }
477             adjusted = (dev_proc(dev, dev_spec_op)(dev, gxdso_adjust_bandheight, NULL, band_height));
478             if (adjusted > 0)
479                 band_height = adjusted;
480         }
481         /* The above calculated bits_size's include space for line ptrs. What is
482          * the offset for the line_ptrs within the buffer? */
483         if (gdev_mem_bits_size(&bdev, band_width, band_height, &cdev->page_line_ptrs_offset) < 0)
484             return_error(gs_error_VMerror);
485     }
486     cdev->ins_count = 0;
487     code = clist_init_tile_cache(dev, data, bits_size);
488     if (code < 0) {
489         if (pbdev->finalize)
490             pbdev->finalize(pbdev);
491         return code;
492     }
493     cdev->page_tile_cache_size = bits_size;
494     data += bits_size;
495     size -= bits_size;
496     code = clist_init_bands(dev, &bdev, size, band_width, band_height);
497     if (code < 0) {
498         if (pbdev->finalize)
499             pbdev->finalize(pbdev);
500         return code;
501     }
502 
503     if (pbdev->finalize)
504         pbdev->finalize(pbdev);
505 
506     return clist_init_states(dev, data, data_size - bits_size);
507 }
508 /*
509  * Reset the device state (for writing).  This routine requires only
510  * data, data_size, and target to be set, and is idempotent.
511  */
512 static int
clist_reset(gx_device * dev)513 clist_reset(gx_device * dev)
514 {
515     gx_device_clist_writer * const cdev =
516         &((gx_device_clist *)dev)->writer;
517     int code = clist_init_data(dev, cdev->data, cdev->data_size);
518     int nbands;
519 
520     if (code < 0)
521         return (cdev->permanent_error = code);
522     /* Now initialize the rest of the state. */
523     cdev->permanent_error = 0;
524     nbands = cdev->nbands;
525     cdev->ymin = cdev->ymax = -1;       /* render_init not done yet */
526     memset(cdev->tile_table, 0, (cdev->tile_hash_mask + 1) *
527        sizeof(*cdev->tile_table));
528     cdev->cnext = cdev->cbuf;
529     cdev->ccl = 0;
530     cdev->band_range_list.head = cdev->band_range_list.tail = 0;
531     cdev->band_range_min = 0;
532     cdev->band_range_max = nbands - 1;
533     {
534         int band;
535         gx_clist_state *states = cdev->states;
536 
537         for (band = 0; band < nbands; band++, states++) {
538             static const gx_clist_state cls_initial = { cls_initial_values };
539 
540             *states = cls_initial;
541         }
542     }
543     /*
544      * Round up the size of the per-tile band mask so that the bits,
545      * which follow it, stay aligned.
546      */
547     cdev->tile_band_mask_size =
548         ((nbands + (align_bitmap_mod * 8 - 1)) >> 3) &
549         ~(align_bitmap_mod - 1);
550     /*
551      * Initialize the all-band parameters to impossible values,
552      * to force them to be written the first time they are used.
553      */
554     memset(&cdev->tile_params, 0, sizeof(cdev->tile_params));
555     cdev->tile_depth = 0;
556     cdev->tile_known_min = nbands;
557     cdev->tile_known_max = -1;
558     GS_STATE_INIT_VALUES_CLIST((&cdev->gs_gstate));
559     cdev->clip_path = NULL;
560     cdev->clip_path_id = gs_no_id;
561     cdev->color_space.byte1 = 0;
562     cdev->color_space.id = gs_no_id;
563     cdev->color_space.space = 0;
564     {
565         int i;
566 
567         for (i = 0; i < countof(cdev->transfer_ids); ++i)
568             cdev->transfer_ids[i] = gs_no_id;
569     }
570     cdev->black_generation_id = gs_no_id;
571     cdev->undercolor_removal_id = gs_no_id;
572     cdev->device_halftone_id = gs_no_id;
573     cdev->image_enum_id = gs_no_id;
574     cdev->cropping_min = cdev->save_cropping_min = 0;
575     cdev->cropping_max = cdev->save_cropping_max = cdev->height;
576     cdev->cropping_saved = false;
577     cdev->cropping_stack = NULL;
578     cdev->cropping_level = 0;
579     cdev->mask_id_count = cdev->mask_id = cdev->temp_mask_id = 0;
580     cdev->icc_table = NULL;
581     cdev->op_fill_active = false;
582     cdev->op_stroke_active = false;
583     return 0;
584 }
585 /*
586  * Initialize the device state (for writing).  This routine requires only
587  * data, data_size, and target to be set, and is idempotent.
588  */
589 static int
clist_init(gx_device * dev)590 clist_init(gx_device * dev)
591 {
592     gx_device_clist_writer * const cdev =
593         &((gx_device_clist *)dev)->writer;
594     int code = clist_reset(dev);
595 
596     if (code >= 0) {
597         cdev->image_enum_id = gs_no_id;
598         cdev->ignore_lo_mem_warnings = 0;
599     }
600     return code;
601 }
602 
603 /* Write out the current parameters that must be at the head of each page */
604 /* if async rendering is in effect */
605 static int
clist_emit_page_header(gx_device * dev)606 clist_emit_page_header(gx_device *dev)
607 {
608     gx_device_clist_writer * const cdev =
609         &((gx_device_clist *)dev)->writer;
610     int code = 0;
611 
612     if ((cdev->disable_mask & clist_disable_pass_thru_params)) {
613         code = clist_put_current_params(cdev);
614         cdev->permanent_error = (code < 0 ? code : 0);
615     }
616     return code;
617 }
618 
619 /* Reset parameters for the beginning of a page. */
620 static void
clist_reset_page(gx_device_clist_writer * cwdev)621 clist_reset_page(gx_device_clist_writer *cwdev)
622 {
623     cwdev->page_bfile_end_pos = 0;
624 }
625 
626 /* Open the device's bandfiles */
627 static int
clist_open_output_file(gx_device * dev)628 clist_open_output_file(gx_device *dev)
629 {
630     gx_device_clist_writer * const cdev =
631         &((gx_device_clist *)dev)->writer;
632     char fmode[4];
633     int code;
634 
635     if (cdev->do_not_open_or_close_bandfiles)
636         return 0; /* external bandfile open/close managed externally */
637     cdev->page_cfile = 0;       /* in case of failure */
638     cdev->page_bfile = 0;       /* ditto */
639     code = clist_init(dev);
640     if (code < 0)
641         return code;
642     snprintf(fmode, sizeof(fmode), "w+%s", gp_fmode_binary_suffix);
643     cdev->page_cfname[0] = 0;   /* create a new file */
644     cdev->page_bfname[0] = 0;   /* ditto */
645     clist_reset_page(cdev);
646     if ((code = cdev->page_info.io_procs->fopen(cdev->page_cfname, fmode, &cdev->page_cfile,
647                             cdev->bandlist_memory, cdev->bandlist_memory,
648                             true)) < 0 ||
649         (code = cdev->page_info.io_procs->fopen(cdev->page_bfname, fmode, &cdev->page_bfile,
650                             cdev->bandlist_memory, cdev->bandlist_memory,
651                             false)) < 0
652         ) {
653         clist_close_output_file(dev);
654         cdev->permanent_error = code;
655     }
656     return code;
657 }
658 
659 /* Close, and free the contents of, the temporary files of a page. */
660 /* Note that this does not deallocate the buffer. */
661 int
clist_close_page_info(gx_band_page_info_t * ppi)662 clist_close_page_info(gx_band_page_info_t *ppi)
663 {
664     if (ppi->cfile != NULL) {
665         ppi->io_procs->fclose(ppi->cfile, ppi->cfname, true);
666         ppi->cfile = NULL;
667         ppi->cfname[0] = 0;     /* prevent re-use in case this is a fake path */
668     }
669     if (ppi->bfile != NULL) {
670         ppi->io_procs->fclose(ppi->bfile, ppi->bfname, true);
671         ppi->bfile = NULL;
672         ppi->bfname[0] = 0;     /* prevent re-use in case this is a fake path */
673     }
674     return 0;
675 }
676 
677 /* Close the device by freeing the temporary files. */
678 /* Note that this does not deallocate the buffer. */
679 int
clist_close_output_file(gx_device * dev)680 clist_close_output_file(gx_device *dev)
681 {
682     gx_device_clist_writer * const cdev =
683         &((gx_device_clist *)dev)->writer;
684 
685     return clist_close_page_info(&cdev->page_info);
686 }
687 
688 /* Open the device by initializing the device state and opening the */
689 /* scratch files. */
690 int
clist_open(gx_device * dev)691 clist_open(gx_device *dev)
692 {
693     gx_device_clist_writer * const cdev =
694         &((gx_device_clist *)dev)->writer;
695     bool save_is_open = dev->is_open;
696     int code;
697 
698     cdev->permanent_error = 0;
699     cdev->is_open = false;
700 
701     cdev->cache_chunk = (gx_bits_cache_chunk *)gs_alloc_bytes(cdev->memory->non_gc_memory, sizeof(gx_bits_cache_chunk), "alloc tile cache for clist");
702     if (!cdev->cache_chunk)
703         return_error(gs_error_VMerror);
704     memset(cdev->cache_chunk, 0x00, sizeof(gx_bits_cache_chunk));
705 
706     code = clist_init(dev);
707     if (code < 0)
708         goto errxit;
709 
710     cdev->icc_cache_list_len = 0;
711     cdev->icc_cache_list = NULL;
712     code = clist_open_output_file(dev);
713     if ( code >= 0)
714         code = clist_emit_page_header(dev);
715     if (code >= 0) {
716         dev->is_open = save_is_open;
717         return code;		/* success */
718     }
719     /* fall through to clean up and return error code */
720 errxit:
721     /* prevent leak */
722     gs_free_object(cdev->memory->non_gc_memory, cdev->cache_chunk, "free tile cache for clist");
723     cdev->cache_chunk = NULL;
724     return code;
725 }
726 
727 static int
clist_close(gx_device * dev)728 clist_close(gx_device *dev)
729 {
730     int i;
731     gx_device_clist_writer * const cdev =
732         &((gx_device_clist *)dev)->writer;
733 
734     /* I'd like to free the cache chunk in here, but we can't, because the pattern clist
735      * device gets closed, but not discarded, later it gets run. So we have to free the memory
736      * in *2* places, once in gdev_prn_tear_down() for regular clists, and once in
737      * gx_pattern_cache_free_entry() for pattern clists....
738      */
739     for(i = 0; i < cdev->icc_cache_list_len; i++) {
740         rc_decrement(cdev->icc_cache_list[i], "clist_close");
741     }
742     cdev->icc_cache_list_len = 0;
743     gs_free_object(cdev->memory->thread_safe_memory, cdev->icc_cache_list, "clist_close");
744     cdev->icc_cache_list = NULL;
745 
746     if (cdev->do_not_open_or_close_bandfiles)
747         return 0;
748     if (dev_proc(cdev, open_device) == pattern_clist_open_device) {
749         gs_free_object(cdev->bandlist_memory, cdev->data, "clist_close");
750         cdev->data = NULL;
751     }
752     return clist_close_output_file(dev);
753 }
754 
755 /* The output_page procedure should never be called! */
756 int
clist_output_page(gx_device * dev,int num_copies,int flush)757 clist_output_page(gx_device * dev, int num_copies, int flush)
758 {
759     return_error(gs_error_Fatal);
760 }
761 
762 /* Reset (or prepare to append to) the command list after printing a page. */
763 int
clist_finish_page(gx_device * dev,bool flush)764 clist_finish_page(gx_device *dev, bool flush)
765 {
766     gx_device_clist_writer *const cdev = &((gx_device_clist *)dev)->writer;
767     int code;
768 
769     /* If this is a reader clist, which is about to be reset to a writer,
770      * free any color_usage array used by same.
771      * since we have been rendering, shut down threads
772      * Also free the icc_table at this time and the icc_cache
773      */
774     if (!CLIST_IS_WRITER((gx_device_clist *)dev)) {
775         gx_device_clist_reader * const crdev =  &((gx_device_clist *)dev)->reader;
776 
777         clist_teardown_render_threads(dev);
778         gs_free_object(cdev->memory, crdev->color_usage_array, "clist_color_usage_array");
779         crdev->color_usage_array = NULL;
780 
781        /* Free the icc table associated with this device.
782            The threads that may have pointed to this were destroyed in
783            the above call to clist_teardown_render_threads.  Since they
784            all maintained a copy of the cache and the table there should not
785            be any issues. */
786         clist_free_icc_table(crdev->icc_table, crdev->memory);
787         crdev->icc_table = NULL;
788     }
789     if (flush) {
790         if (cdev->page_cfile != 0) {
791             code = cdev->page_info.io_procs->rewind(cdev->page_cfile, true, cdev->page_cfname);
792             if (code < 0) return code;
793         }
794         if (cdev->page_bfile != 0) {
795             code = cdev->page_info.io_procs->rewind(cdev->page_bfile, true, cdev->page_bfname);
796             if (code < 0) return code;
797         }
798         cdev->page_info.bfile_end_pos = 0;
799         clist_reset_page(cdev);
800     } else {
801         if (cdev->page_cfile != 0)
802             cdev->page_info.io_procs->fseek(cdev->page_cfile, 0L, SEEK_END, cdev->page_cfname);
803         if (cdev->page_bfile != 0)
804             cdev->page_info.io_procs->fseek(cdev->page_bfile, 0L, SEEK_END, cdev->page_bfname);
805     }
806     code = clist_init(dev);             /* reinitialize */
807     if (code >= 0)
808         code = clist_emit_page_header(dev);
809 
810     return code;
811 }
812 
813 /* ------ Writing ------ */
814 
815 /* End a page by flushing the buffer and terminating the command list. */
816 int     /* ret 0 all-ok, -ve error code, or +1 ok w/low-mem warning */
clist_end_page(gx_device_clist_writer * cldev)817 clist_end_page(gx_device_clist_writer * cldev)
818 {
819     int code;
820     cmd_block cb;
821     int ecode = 0;
822 
823     code = cmd_write_buffer(cldev, cmd_opv_end_page);
824     if (code >= 0)
825         ecode |= code;
826     else
827         ecode = code;
828 
829     /* If we have ICC profiles present in the cfile save the table now,
830        along with the ICC profiles. Table is stored in band maxband + 1. */
831     if ( cldev->icc_table != NULL ) {
832         /* Save the table */
833         code = clist_icc_writetable(cldev);
834         /* Free the table */
835         clist_free_icc_table(cldev->icc_table, cldev->memory);
836         cldev->icc_table = NULL;
837     }
838     if (code >= 0) {
839         code = clist_write_color_usage_array(cldev);
840         if (code >= 0) {
841             ecode |= code;
842             /*
843              * Write the terminating entry in the block file.
844              * Note that because of copypage, there may be many such entries.
845              */
846             memset(&cb, 0, sizeof(cb)); /* Zero the block, including any padding */
847             cb.band_min = cb.band_max = cmd_band_end;
848             cb.pos = (cldev->page_cfile == 0 ? 0 : cldev->page_info.io_procs->ftell(cldev->page_cfile));
849             code = cldev->page_info.io_procs->fwrite_chars(&cb, sizeof(cb), cldev->page_bfile);
850             if (code > 0)
851                 code = 0;
852         }
853     }
854     if (code >= 0) {
855         ecode |= code;
856         cldev->page_bfile_end_pos = cldev->page_info.io_procs->ftell(cldev->page_bfile);
857     } else
858         ecode = code;
859 
860     /* Reset warning margin to 0 to release reserve memory if mem files */
861     if (cldev->page_bfile != 0)
862         cldev->page_info.io_procs->set_memory_warning(cldev->page_bfile, 0);
863     if (cldev->page_cfile != 0)
864         cldev->page_info.io_procs->set_memory_warning(cldev->page_cfile, 0);
865 
866 #ifdef DEBUG
867     if (gs_debug_c('l') | gs_debug_c(':')) {
868         if (cb.pos <= 0xFFFFFFFF)
869             dmlprintf2(cldev->memory, "[:]clist_end_page at cfile=%lu, bfile=%lu\n",
870                   (unsigned long)cb.pos, (unsigned long)cldev->page_bfile_end_pos);
871         else
872             dmlprintf3(cldev->memory, "[:]clist_end_page at cfile=%lu%0lu, bfile=%lu\n",
873                 (unsigned long) (cb.pos >> 32), (unsigned long) (cb.pos & 0xFFFFFFFF),
874                 (unsigned long)cldev->page_bfile_end_pos);
875     }
876 #endif
877     if (cldev->page_uses_transparency && gs_debug[':']) {
878         /* count how many bands were skipped */
879         int skip_count = 0;
880         int band;
881 
882         for (band=0; band < cldev->nbands - 1; band++) {
883             if (cldev->states[band].color_usage.trans_bbox.p.y >
884                 cldev->states[band].color_usage.trans_bbox.q.y)
885                 skip_count++;
886         }
887         dprintf2("%d bands skipped out of %d\n", skip_count, cldev->nbands);
888     }
889 
890     return ecode;
891 }
892 
893 gx_color_usage_bits
gx_color_index2usage(gx_device * dev,gx_color_index color)894 gx_color_index2usage(gx_device *dev, gx_color_index color)
895 {
896     gx_color_usage_bits bits = 0;
897     uchar i;
898 
899     if (dev->color_info.polarity == GX_CINFO_POLARITY_ADDITIVE)
900         color = color ^ ~0;		/* white is 0 */
901 
902     for (i = 0; i < dev->color_info.num_components; i++) {
903         if (color & dev->color_info.comp_mask[i])
904             bits |= (1<<i);
905     }
906     return bits;
907 }
908 
909 /* Write the target device's current parameter list */
910 static int      /* ret 0 all ok, -ve error */
clist_put_current_params(gx_device_clist_writer * cldev)911 clist_put_current_params(gx_device_clist_writer *cldev)
912 {
913     gx_device *target = cldev->target;
914     gs_c_param_list param_list;
915     int code;
916 
917     /*
918      * If a put_params call fails, the device will be left in a closed
919      * state, but higher-level code won't notice this fact.  We flag this by
920      * setting permanent_error, which prevents writing to the command list.
921      */
922 
923     if (cldev->permanent_error)
924         return cldev->permanent_error;
925     gs_c_param_list_write(&param_list, cldev->memory);
926     code = (*dev_proc(target, get_params))
927         (target, (gs_param_list *)&param_list);
928     if (code >= 0) {
929         gs_c_param_list_read(&param_list);
930         code = cmd_put_params( cldev, (gs_param_list *)&param_list );
931     }
932     gs_c_param_list_release(&param_list);
933 
934     return code;
935 }
936 
937 /* ---------------- Driver interface ---------------- */
938 
939 static int
clist_get_band(gx_device * dev,int y,int * band_start)940 clist_get_band(gx_device * dev, int y, int *band_start)
941 {
942     gx_device_clist_writer * const cdev =
943         &((gx_device_clist *)dev)->writer;
944     int band_height = cdev->page_band_height;
945     int start;
946 
947     if (y < 0)
948         y = 0;
949     else if (y >= dev->height)
950         y = dev->height;
951     *band_start = start = y - y % band_height;
952     return min(dev->height - start, band_height);
953 }
954 
955 /* ICC table operations.  See gxclist.h for details */
956 /* This checks the table for a hash code entry */
957 bool
clist_icc_searchtable(gx_device_clist_writer * cdev,int64_t hashcode)958 clist_icc_searchtable(gx_device_clist_writer *cdev, int64_t hashcode)
959 {
960     clist_icctable_t *icc_table = cdev->icc_table;
961     clist_icctable_entry_t *curr_entry;
962 
963     if (icc_table == NULL)
964         return(false);  /* No entry */
965     curr_entry = icc_table->head;
966     while(curr_entry != NULL) {
967         if (curr_entry->serial_data.hashcode == hashcode){
968             return(true);
969         }
970         curr_entry = curr_entry->next;
971     }
972      return(false);  /* No entry */
973 }
974 
975 static void
clist_free_icc_table_contents(clist_icctable_t * icc_table)976 clist_free_icc_table_contents(clist_icctable_t *icc_table)
977 {
978     int number_entries;
979     clist_icctable_entry_t *curr_entry, *next_entry;
980     int k;
981 
982     number_entries = icc_table->tablesize;
983     curr_entry = icc_table->head;
984     for (k = 0; k < number_entries; k++) {
985         next_entry = curr_entry->next;
986         gsicc_adjust_profile_rc(curr_entry->icc_profile, -1, "clist_free_icc_table");
987         gs_free_object(icc_table->memory, curr_entry, "clist_free_icc_table");
988         curr_entry = next_entry;
989     }
990 }
991 
992 void
clist_icc_table_finalize(const gs_memory_t * memory,void * vptr)993 clist_icc_table_finalize(const gs_memory_t *memory, void * vptr)
994 {
995     clist_icctable_t *icc_table = (clist_icctable_t *)vptr;
996 
997     clist_free_icc_table_contents(icc_table);
998 }
999 
1000 /* Free the table */
1001 int
clist_free_icc_table(clist_icctable_t * icc_table,gs_memory_t * memory)1002 clist_free_icc_table(clist_icctable_t *icc_table, gs_memory_t *memory)
1003 {
1004     if (icc_table == NULL)
1005         return(0);
1006 
1007     gs_free_object(icc_table->memory, icc_table, "clist_free_icc_table");
1008     return(0);
1009 }
1010 
1011 /* This serializes the ICC table and writes it out for maxband+1 */
1012 int
clist_icc_writetable(gx_device_clist_writer * cldev)1013 clist_icc_writetable(gx_device_clist_writer *cldev)
1014 {
1015     unsigned char *pbuf, *buf;
1016     clist_icctable_t *icc_table = cldev->icc_table;
1017     int number_entries = icc_table->tablesize;
1018     clist_icctable_entry_t *curr_entry;
1019     int size_data;
1020     int k;
1021     bool rend_is_valid;
1022 
1023     /* First we need to write out the ICC profiles themselves and update
1024        in the table where they will be stored and their size.  Set the
1025        rend cond valid flag prior to writing */
1026     curr_entry = icc_table->head;
1027     for ( k = 0; k < number_entries; k++ ){
1028         rend_is_valid = curr_entry->icc_profile->rend_is_valid;
1029         curr_entry->icc_profile->rend_is_valid = curr_entry->render_is_valid;
1030         curr_entry->serial_data.file_position = clist_icc_addprofile(cldev, curr_entry->icc_profile, &size_data);
1031         curr_entry->icc_profile->rend_is_valid = rend_is_valid;
1032         curr_entry->serial_data.size = size_data;
1033         gsicc_adjust_profile_rc(curr_entry->icc_profile, -1, "clist_icc_writetable");
1034         curr_entry->icc_profile = NULL;
1035         curr_entry = curr_entry->next;
1036     }
1037 
1038     /* Now serialize the table data */
1039     size_data = number_entries*sizeof(clist_icc_serial_entry_t) + sizeof(number_entries);
1040     buf = gs_alloc_bytes(cldev->memory, size_data, "clist_icc_writetable");
1041     if (buf == NULL)
1042         return gs_rethrow(-1, "insufficient memory for icc table buffer");
1043     pbuf = buf;
1044     memcpy(pbuf, &number_entries, sizeof(number_entries));
1045     pbuf += sizeof(number_entries);
1046     curr_entry = icc_table->head;
1047     for (k = 0; k < number_entries; k++) {
1048         memcpy(pbuf, &(curr_entry->serial_data), sizeof(clist_icc_serial_entry_t));
1049         pbuf += sizeof(clist_icc_serial_entry_t);
1050         curr_entry = curr_entry->next;
1051     }
1052     /* Now go ahead and save the table data */
1053     cmd_write_pseudo_band(cldev, buf, size_data, ICC_TABLE_OFFSET);
1054     gs_free_object(cldev->memory, buf, "clist_icc_writetable");
1055     return(0);
1056 }
1057 
1058 /* This write the actual data out to the cfile */
1059 
1060 int64_t
clist_icc_addprofile(gx_device_clist_writer * cldev,cmm_profile_t * iccprofile,int * size)1061 clist_icc_addprofile(gx_device_clist_writer *cldev, cmm_profile_t *iccprofile, int *size)
1062 {
1063 
1064     clist_file_ptr cfile = cldev->page_cfile;
1065     int64_t fileposit;
1066 #if defined(DEBUG) || defined(PACIFY_VALGRIND)
1067     gsicc_serialized_profile_t profile_data = { 0 };
1068 #else
1069     gsicc_serialized_profile_t profile_data;
1070 #endif
1071     int count1, count2;
1072 
1073     /* Get the current position */
1074     fileposit = cldev->page_info.io_procs->ftell(cfile);
1075     /* Get the serialized header */
1076     gsicc_profile_serialize(&profile_data, iccprofile);
1077     /* Write the header */
1078     if_debug1m('l', cldev->memory, "[l]writing icc profile in cfile at pos %"PRId64"\n",fileposit);
1079     count1 = cldev->page_info.io_procs->fwrite_chars(&profile_data, GSICC_SERIALIZED_SIZE, cfile);
1080     /* Now write the profile */
1081     count2 = cldev->page_info.io_procs->fwrite_chars(iccprofile->buffer, iccprofile->buffer_size, cfile);
1082     /* Return where we wrote this in the cfile */
1083     *size = count1 + count2;
1084     return(fileposit);
1085 }
1086 
1087 /* This add a new entry into the table */
1088 
1089 int
clist_icc_addentry(gx_device_clist_writer * cdev,int64_t hashcode_in,cmm_profile_t * icc_profile)1090 clist_icc_addentry(gx_device_clist_writer *cdev, int64_t hashcode_in, cmm_profile_t *icc_profile)
1091 {
1092 
1093     clist_icctable_t *icc_table = cdev->icc_table;
1094     clist_icctable_entry_t *entry, *curr_entry;
1095     int k;
1096     int64_t hashcode;
1097     gs_memory_t *stable_mem = cdev->memory->stable_memory;
1098 
1099     /* If the hash code is not valid then compute it now */
1100     if (icc_profile->hash_is_valid == false) {
1101         gsicc_get_icc_buff_hash(icc_profile->buffer, &hashcode,
1102                                 icc_profile->buffer_size);
1103         icc_profile->hashcode = hashcode;
1104         icc_profile->hash_is_valid = true;
1105     } else {
1106         hashcode = hashcode_in;
1107     }
1108     if ( icc_table == NULL ) {
1109         entry = (clist_icctable_entry_t *) gs_alloc_struct(stable_mem,
1110                                clist_icctable_entry_t, &st_clist_icctable_entry,
1111                                "clist_icc_addentry");
1112         if (entry == NULL)
1113             return gs_rethrow(-1, "insufficient memory to allocate entry in icc table");
1114 #ifdef PACIFY_VALGRIND
1115         /* Avoid uninitialised padding upsetting valgrind when it's written
1116          * into the clist. */
1117         memset(entry, 0, sizeof(*entry));
1118 #endif
1119         entry->next = NULL;
1120         entry->serial_data.hashcode = hashcode;
1121         entry->serial_data.size = -1;
1122         entry->serial_data.file_position = -1;
1123         entry->icc_profile = icc_profile;
1124         entry->render_is_valid = icc_profile->rend_is_valid;
1125         gsicc_adjust_profile_rc(icc_profile, 1, "clist_icc_addentry");
1126         icc_table = gs_alloc_struct(stable_mem, clist_icctable_t,
1127                                     &st_clist_icctable, "clist_icc_addentry");
1128         if (icc_table == NULL)
1129             return gs_rethrow(-1, "insufficient memory to allocate icc table");
1130         icc_table->tablesize = 1;
1131         icc_table->head = entry;
1132         icc_table->final = entry;
1133         icc_table->memory = stable_mem;
1134         /* For now, we are just going to put the icc_table itself
1135             at band_range_max + 1.  The ICC profiles are written
1136             in the cfile at the current stored file position*/
1137         cdev->icc_table = icc_table;
1138     } else {
1139         /* First check if we already have this entry */
1140         curr_entry = icc_table->head;
1141         for (k = 0; k < icc_table->tablesize; k++) {
1142             if (curr_entry->serial_data.hashcode == hashcode)
1143                 return 0;  /* A hit */
1144             curr_entry = curr_entry->next;
1145         }
1146          /* Add a new ICC profile */
1147         entry =
1148             (clist_icctable_entry_t *) gs_alloc_struct(icc_table->memory,
1149                                                        clist_icctable_entry_t,
1150                                                        &st_clist_icctable_entry,
1151                                                        "clist_icc_addentry");
1152         if (entry == NULL)
1153             return gs_rethrow(-1, "insufficient memory to allocate entry in icc table");
1154 #ifdef PACIFY_VALGRIND
1155         /* Avoid uninitialised padding upsetting valgrind when it's written
1156          * into the clist. */
1157         memset(entry, 0, sizeof(*entry));
1158 #endif
1159         entry->next = NULL;
1160         entry->serial_data.hashcode = hashcode;
1161         entry->serial_data.size = -1;
1162         entry->serial_data.file_position = -1;
1163         entry->icc_profile = icc_profile;
1164         entry->render_is_valid = icc_profile->rend_is_valid;
1165         gsicc_adjust_profile_rc(icc_profile, 1, "clist_icc_addentry");
1166         icc_table->final->next = entry;
1167         icc_table->final = entry;
1168         icc_table->tablesize++;
1169     }
1170     return(0);
1171 }
1172 
1173 /* This writes out the color_usage_array for maxband+1 */
1174 int
clist_write_color_usage_array(gx_device_clist_writer * cldev)1175 clist_write_color_usage_array(gx_device_clist_writer *cldev)
1176 {
1177    gx_color_usage_t *color_usage_array;
1178    int i, size_data = cldev->nbands * sizeof(gx_color_usage_t);
1179 
1180     /* Now serialize the table data */
1181     color_usage_array = (gx_color_usage_t *)gs_alloc_bytes(cldev->memory, size_data,
1182                                        "clist_write_color_usage_array");
1183     if (color_usage_array == NULL)
1184         return gs_rethrow(-1, "insufficient memory for color_usage_array");
1185     for (i = 0; i < cldev->nbands; i++) {
1186         memcpy(&(color_usage_array[i]), &(cldev->states[i].color_usage), sizeof(gx_color_usage_t));
1187     }
1188     /* Now go ahead and save the table data */
1189     cmd_write_pseudo_band(cldev, (unsigned char *)color_usage_array,
1190                           size_data, COLOR_USAGE_OFFSET);
1191     gs_free_object(cldev->memory, color_usage_array, "clist_write_color_usage_array");
1192     return(0);
1193 }
1194 
1195 /* Compute color_usage over a Y range while writing clist */
1196 /* Sets color_usage fields and range_start.               */
1197 /* Returns range end (max dev->height)                    */
1198 /* NOT expected to be used. */
1199 int
clist_writer_color_usage(gx_device_clist_writer * cldev,int y,int height,gx_color_usage_t * color_usage,int * range_start)1200 clist_writer_color_usage(gx_device_clist_writer *cldev, int y, int height,
1201                      gx_color_usage_t *color_usage, int *range_start)
1202 {
1203         gx_color_usage_bits or = 0;
1204         bool slow_rop = false;
1205         int i, band_height = cldev->page_band_height;
1206         int start = y / band_height, end = (y + height) / band_height;
1207 
1208         for (i = start; i < end; ++i) {
1209             or |= cldev->states[i].color_usage.or;
1210             slow_rop |= cldev->states[i].color_usage.slow_rop;
1211         }
1212         color_usage->or = or;
1213         color_usage->slow_rop = slow_rop;
1214         *range_start = start * band_height;
1215         return min(end * band_height, cldev->height) - *range_start;
1216 }
1217 
1218 int
clist_writer_push_no_cropping(gx_device_clist_writer * cdev)1219 clist_writer_push_no_cropping(gx_device_clist_writer *cdev)
1220 {
1221     clist_writer_cropping_buffer_t *buf = gs_alloc_struct(cdev->memory,
1222                 clist_writer_cropping_buffer_t,
1223                 &st_clist_writer_cropping_buffer, "clist_writer_transparency_push");
1224 
1225     if (buf == NULL)
1226         return_error(gs_error_VMerror);
1227     if_debug4m('v', cdev->memory, "[v]push cropping[%d], min=%d, max=%d, buf=%p\n",
1228                cdev->cropping_level, cdev->cropping_min, cdev->cropping_max, buf);
1229     buf->next = cdev->cropping_stack;
1230     cdev->cropping_stack = buf;
1231     buf->cropping_min = cdev->cropping_min;
1232     buf->cropping_max = cdev->cropping_max;
1233     buf->mask_id = cdev->mask_id;
1234     buf->temp_mask_id = cdev->temp_mask_id;
1235     cdev->cropping_level++;
1236     return 0;
1237 }
1238 
1239 int
clist_writer_push_cropping(gx_device_clist_writer * cdev,int ry,int rheight)1240 clist_writer_push_cropping(gx_device_clist_writer *cdev, int ry, int rheight)
1241 {
1242     int code = clist_writer_push_no_cropping(cdev);
1243 
1244     if (code < 0)
1245         return 0;
1246     cdev->cropping_min = max(cdev->cropping_min, ry);
1247     cdev->cropping_max = min(cdev->cropping_max, ry + rheight);
1248     return 0;
1249 }
1250 
1251 int
clist_writer_pop_cropping(gx_device_clist_writer * cdev)1252 clist_writer_pop_cropping(gx_device_clist_writer *cdev)
1253 {
1254     clist_writer_cropping_buffer_t *buf = cdev->cropping_stack;
1255 
1256     if (buf == NULL)
1257         return_error(gs_error_unregistered); /*Must not happen. */
1258     cdev->cropping_min = buf->cropping_min;
1259     cdev->cropping_max = buf->cropping_max;
1260     cdev->mask_id = buf->mask_id;
1261     cdev->temp_mask_id = buf->temp_mask_id;
1262     cdev->cropping_stack = buf->next;
1263     cdev->cropping_level--;
1264     if_debug4m('v', cdev->memory, "[v]pop cropping[%d] min=%d, max=%d, buf=%p\n",
1265                cdev->cropping_level, cdev->cropping_min, cdev->cropping_max, buf);
1266     gs_free_object(cdev->memory, buf, "clist_writer_transparency_pop");
1267     return 0;
1268 }
1269 
1270 int
clist_writer_check_empty_cropping_stack(gx_device_clist_writer * cdev)1271 clist_writer_check_empty_cropping_stack(gx_device_clist_writer *cdev)
1272 {
1273     if (cdev->cropping_stack != NULL) {
1274         if_debug1m('v', cdev->memory, "[v]Error: left %d cropping(s)\n", cdev->cropping_level);
1275         return_error(gs_error_unregistered); /* Must not happen */
1276     }
1277     return 0;
1278 }
1279 
1280 /* Retrieve total size for cfile and bfile. */
clist_data_size(const gx_device_clist * cdev,int select)1281 int clist_data_size(const gx_device_clist *cdev, int select)
1282 {
1283     const gx_band_page_info_t *pinfo = &cdev->common.page_info;
1284     clist_file_ptr pfile = (!select ? pinfo->bfile : pinfo->cfile);
1285     const char *fname = (!select ? pinfo->bfname : pinfo->cfname);
1286     int code, size;
1287 
1288     code = pinfo->io_procs->fseek(pfile, 0, SEEK_END, fname);
1289     if (code < 0)
1290         return_error(gs_error_unregistered); /* Must not happen. */
1291     code = pinfo->io_procs->ftell(pfile);
1292     if (code < 0)
1293         return_error(gs_error_unregistered); /* Must not happen. */
1294     size = code;
1295     return size;
1296 }
1297 
1298 /* Get command list data. */
1299 int
clist_get_data(const gx_device_clist * cdev,int select,int64_t offset,byte * buf,int length)1300 clist_get_data(const gx_device_clist *cdev, int select, int64_t offset, byte *buf, int length)
1301 {
1302     const gx_band_page_info_t *pinfo = &cdev->common.page_info;
1303     clist_file_ptr pfile = (!select ? pinfo->bfile : pinfo->cfile);
1304     const char *fname = (!select ? pinfo->bfname : pinfo->cfname);
1305     int code;
1306 
1307     code = pinfo->io_procs->fseek(pfile, offset, SEEK_SET, fname);
1308     if (code < 0)
1309         return_error(gs_error_unregistered); /* Must not happen. */
1310     /* This assumes that fread_chars doesn't return prematurely
1311        when the buffer is not fully filled and the end of stream is not reached. */
1312     return pinfo->io_procs->fread_chars(buf, length, pfile);
1313 }
1314 
1315 /* Put command list data. */
1316 int
clist_put_data(const gx_device_clist * cdev,int select,int64_t offset,const byte * buf,int length)1317 clist_put_data(const gx_device_clist *cdev, int select, int64_t offset, const byte *buf, int length)
1318 {
1319     const gx_band_page_info_t *pinfo = &cdev->common.page_info;
1320     clist_file_ptr pfile = (!select ? pinfo->bfile : pinfo->cfile);
1321     int64_t code;
1322 
1323     code = pinfo->io_procs->ftell(pfile);
1324     if (code < 0)
1325         return_error(gs_error_unregistered); /* Must not happen. */
1326     if (code != offset) {
1327         /* Assuming a consecutive writing only. */
1328         return_error(gs_error_unregistered); /* Must not happen. */
1329     }
1330     /* This assumes that fwrite_chars doesn't return prematurely
1331        when the buffer is not fully written, except with an error. */
1332     return pinfo->io_procs->fwrite_chars(buf, length, pfile);
1333 }
1334 
1335 gx_device_clist *
clist_make_accum_device(gx_device * target,const char * dname,void * base,int space,gx_device_buf_procs_t * buf_procs,gx_band_params_t * band_params,bool use_memory_clist,bool uses_transparency,gs_pattern1_instance_t * pinst)1336 clist_make_accum_device(gx_device *target, const char *dname, void *base, int space,
1337                         gx_device_buf_procs_t *buf_procs, gx_band_params_t *band_params,
1338                         bool use_memory_clist, bool uses_transparency,
1339                         gs_pattern1_instance_t *pinst)
1340 {
1341         gs_memory_t *mem = target->memory;
1342         gx_device_clist *cdev = gs_alloc_struct(mem, gx_device_clist,
1343                         &st_device_clist, "clist_make_accum_device");
1344         gx_device_clist_writer *cwdev = (gx_device_clist_writer *)cdev;
1345 
1346         if (cdev == 0)
1347             return 0;
1348         memset(cdev, 0, sizeof(*cdev));
1349         cwdev->params_size = sizeof(gx_device_clist);
1350         cwdev->static_procs = NULL;
1351         cwdev->dname = dname;
1352         cwdev->memory = mem;
1353         cwdev->stype = &st_device_clist;
1354         cwdev->stype_is_dynamic = false;
1355         rc_init(cwdev, mem, 1);
1356         cwdev->retained = true;
1357         cwdev->is_open = false;
1358         cwdev->color_info = target->color_info;
1359         cwdev->pinst = pinst;
1360         cwdev->cached_colors = target->cached_colors;
1361         if (pinst != NULL) {
1362             cwdev->width = pinst->size.x;
1363             cwdev->height = pinst->size.y;
1364             cwdev->band_params.BandHeight = pinst->size.y;
1365         } else {
1366             cwdev->width = target->width;
1367             cwdev->height = target->height;
1368         }
1369         cwdev->LeadingEdge = target->LeadingEdge;
1370         cwdev->is_planar = target->is_planar;
1371         cwdev->HWResolution[0] = target->HWResolution[0];
1372         cwdev->HWResolution[1] = target->HWResolution[1];
1373         cwdev->icc_cache_cl = NULL;
1374         cwdev->icc_table = NULL;
1375         cwdev->UseCIEColor = target->UseCIEColor;
1376         cwdev->LockSafetyParams = true;
1377         cwdev->procs = gs_clist_device_procs;
1378         gx_device_copy_color_params((gx_device *)cwdev, target);
1379         rc_assign(cwdev->target, target, "clist_make_accum_device");
1380         clist_init_io_procs(cdev, use_memory_clist);
1381         cwdev->data = base;
1382         cwdev->data_size = space;
1383         memcpy (&(cwdev->buf_procs), buf_procs, sizeof(gx_device_buf_procs_t));
1384         cwdev->page_uses_transparency = uses_transparency;
1385         cwdev->band_params.BandWidth = cwdev->width;
1386         cwdev->band_params.BandBufferSpace = 0;
1387         cwdev->do_not_open_or_close_bandfiles = false;
1388         cwdev->bandlist_memory = mem->non_gc_memory;
1389         set_dev_proc(cwdev, get_clipping_box, gx_default_get_clipping_box);
1390         set_dev_proc(cwdev, get_profile, gx_forward_get_profile);
1391         set_dev_proc(cwdev, set_graphics_type_tag, gx_forward_set_graphics_type_tag);
1392         cwdev->graphics_type_tag = target->graphics_type_tag;		/* initialize to same as target */
1393         cwdev->interpolate_control = target->interpolate_control;	/* initialize to same as target */
1394 
1395         /* to be set by caller: cwdev->finalize = finalize; */
1396 
1397         /* Fields left zeroed :
1398             int   max_fill_band;
1399             int   is_printer;
1400             float MediaSize[2];
1401             float ImagingBBox[4];
1402             bool  ImagingBBox_set;
1403             float Margins[2];
1404             float HWMargins[4];
1405             long  PageCount;
1406             long  ShowpageCount;
1407             int   NumCopies;
1408             bool  NumCopies_set;
1409             bool  IgnoreNumCopies;
1410             int   disable_mask;
1411             gx_page_device_procs page_procs;
1412 
1413         */
1414         return cdev;
1415 }
1416