1 /* Copyright (C) 2001-2012 Artifex Software, Inc.
2    All Rights Reserved.
3 
4    This software is provided AS-IS with no warranty, either express or
5    implied.
6 
7    This software is distributed under license and may not be copied,
8    modified or distributed except as expressly authorized under the terms
9    of the license contained in the file LICENSE in this distribution.
10 
11    Refer to licensing information at http://www.artifex.com or contact
12    Artifex Software, Inc.,  7 Mt. Lassen Drive - Suite A-134, San Rafael,
13    CA  94903, U.S.A., +1(415)492-9861, for further information.
14 */
15 
16 
17 /* Generic asynchronous printer driver support */
18 
19 /* Initial version 2/1/98 by John Desrosiers (soho@crl.com) */
20 /* Revised 8/7/98 by L. Peter Deutsch (ghost@aladdin.com) for */
21 /*   memory manager changes */
22 /* 12/1/98 soho@crl.com - Removed unnecessary flush & reopen in */
23 /*         gdev_prn_async_write_get_hardware_params */
24 #include "gdevprna.h"
25 #include "gsalloc.h"
26 #include "gsdevice.h"
27 #include "gsmemlok.h"
28 #include "gsmemret.h"
29 #include "gsnogc.h"
30 #include "gxcldev.h"
31 #include "gxclpath.h"
32 #include "gxpageq.h"
33 #include "gzht.h"		/* for gx_ht_cache_default_bits_size */
34 
35 /* ----------------- Constants ----------------------- */
36 /*
37  * Fixed overhead # bytes to run renderer in (+ driver-spec'd variable bytes):
38  * empirical & still very subject to change.
39  */
40 #define RendererAllocationOverheadBytes 503000 /* minimum is 503,000 as of 4/26/99 */
41 
42 #ifdef DEBUG
43 /* 196000 is pretty much the minimum, given 16K phys memfile blocks */
44 /*# define DebugBandlistMemorySize 196000*/ /* comment out to disable fixed (debug) bandlist size */
45 #endif /* defined(DEBUG) */
46 
47 /* ---------------- Standard device procedures ---------------- */
48 static dev_proc_close_device(gdev_prn_async_write_close_device);
49 static dev_proc_output_page(gdev_prn_async_write_output_page);
50 static dev_proc_put_params(gdev_prn_async_write_put_params);
51 static dev_proc_get_hardware_params(gdev_prn_async_write_get_hardware_params);
52 static dev_proc_put_params(gdev_prn_async_render_put_params);
53 
54 /* ---------------- Forward Declarations ---------------------- */
55 static void gdev_prn_dealloc(gx_device_printer *);
56 static proc_free_up_bandlist_memory(gdev_prn_async_write_free_up_bandlist_memory);
57 static int flush_page(gx_device_printer *, bool);
58 static int reopen_clist_after_flush(gx_device_printer *);
59 static void reinit_printer_into_printera(gx_device_printer * const);
60 static int alloc_bandlist_memory(gs_memory_t **, gs_memory_t *);
61 static void free_bandlist_memory(gs_memory_t *);
62 static int alloc_render_memory(gs_memory_t **, gs_memory_t *, long);
63 static void free_render_memory(gs_memory_t *);
64 static gs_memory_recover_status_t
65     prna_mem_recover(gs_memory_retrying_t *rmem, void *proc_data);
66 
67 /* ------ Open/close ------ */
68 
69 /*
70  * Open this printer device in ASYNC (overlapped) mode.
71  * This routine must always called by the concrete device's xx_open routine
72  * in lieu of gdev_prn_open.
73  */
74 int
gdev_prn_async_write_open(gx_device_printer * pwdev,int max_raster,int min_band_height,int max_src_image_row)75 gdev_prn_async_write_open(gx_device_printer * pwdev, int max_raster,
76                           int min_band_height, int max_src_image_row)
77 {
78     gx_device *const pdev = (gx_device *) pwdev;
79     int code;
80     bool writer_is_open = false;
81     gx_device_clist_writer *const pcwdev =
82         &((gx_device_clist *) pwdev)->writer;
83     gx_device_clist_reader *pcrdev = 0;
84     gx_device_printer *prdev = 0;
85     gs_memory_t *render_memory = 0;	/* renderer's mem allocator */
86 
87     pwdev->page_queue = 0;
88     pwdev->bandlist_memory = 0;
89     pwdev->async_renderer = 0;
90 
91     /* allocate & init render memory */
92     /* The big memory consumers are: */
93     /* - the buffer used to read images from the command list */
94     /* - buffer used by gx_real_default_strip_copy_rop() */
95     /* - line pointer tables for memory devices used in plane extraction */
96     /* - the halftone cache */
97     /* - the band rendering buffer */
98     /* The * 2's in the next statement are a ****** HACK ****** to deal with */
99     /* sandbars in the memory manager. */
100     if ((code = alloc_render_memory(&render_memory,
101             pwdev->memory->non_gc_memory, RendererAllocationOverheadBytes + max_raster
102                                     /* the first * 2 is not a hack */
103                    + (max_raster + sizeof(void *) * 2) * min_band_height
104                    + max_src_image_row + gx_ht_cache_default_bits_size() * 2)) < 0)
105              goto open_err;
106 
107     /* Alloc & init bandlist allocators */
108     /* Bandlist mem is threadsafe & common to rdr/wtr, so it's used */
109     /* for page queue & cmd list buffers. */
110     if ((code = alloc_bandlist_memory
111          (&pwdev->bandlist_memory, pwdev->memory->non_gc_memory)) < 0)
112         goto open_err;
113 
114     /* Dictate banding parameters for both renderer & writer */
115     /* Protect from user change, since user changing these won't be */
116     /* detected, ergo the necessary close/reallocate/open wouldn't happen. */
117     pwdev->space_params.banding_type = BandingAlways;
118     pwdev->space_params.params_are_read_only = true;
119 
120     /* Make a copy of device for use as rendering device b4 opening writer */
121     code = gs_copydevice((gx_device **) & prdev, pdev, render_memory);
122     pcrdev = &((gx_device_clist *) prdev)->reader;
123     if (code < 0)
124         goto open_err;
125 
126     /* -------------- Open cmd list WRITER instance of device ------- */
127     /* --------------------------------------------------------------- */
128     /* This is wrong, because it causes the same thing in the renderer */
129     pwdev->OpenOutputFile = 0;	/* Don't open output file in writer */
130 
131     /* Hack: set this vector to tell gdev_prn_open to allocate for async rendering */
132     pwdev->free_up_bandlist_memory = &gdev_prn_async_write_free_up_bandlist_memory;
133 
134     /* prevent clist writer from queuing path graphics & force it to split images */
135     pwdev->clist_disable_mask |= clist_disable_fill_path |
136         clist_disable_stroke_path | clist_disable_complex_clip |
137         clist_disable_nonrect_hl_image | clist_disable_pass_thru_params;
138 
139     if ((code = gdev_prn_open(pdev)) >= 0) {
140         writer_is_open = true;
141 
142         /* set up constant async-specific fields in device */
143         reinit_printer_into_printera(pwdev);
144 
145         /* keep ptr to renderer device */
146         pwdev->async_renderer = prdev;
147 
148         /* Allocate the page queue, then initialize it */
149         /* Use bandlist memory since it's shared between rdr & wtr */
150         if ((pwdev->page_queue = gx_page_queue_alloc(pwdev->bandlist_memory)) == 0)
151             code = gs_note_error(gs_error_VMerror);
152         else
153             /* Allocate from clist allocator since it is thread-safe */
154             code = gx_page_queue_init(pwdev->page_queue, pwdev->bandlist_memory);
155     }
156     /* ------------ Open cmd list RENDERER instance of device ------- */
157     /* --------------------------------------------------------------- */
158     if (code >= 0) {
159         gx_semaphore_t *open_semaphore;
160 
161         /* Force writer's actual band params into reader's requested params */
162         prdev->space_params.band = pcwdev->page_info.band_params;
163 
164         /* copydevice has already set up prdev->memory = render_memory */
165         /* prdev->bandlist_memory = pwdev->bandlist_memory; */
166         prdev->buffer_memory = prdev->memory;
167 
168         /* enable renderer to accept changes to params computed by writer */
169         prdev->space_params.params_are_read_only = false;
170 
171         /* page queue is common to both devices */
172         prdev->page_queue = pwdev->page_queue;
173 
174         /* Start renderer thread & wait for its successful open of device */
175         if (!(open_semaphore = gx_semaphore_alloc(prdev->memory)))
176             code = gs_note_error(gs_error_VMerror);
177         else {
178             gdev_prn_start_render_params thread_params;
179 
180             thread_params.writer_device = pwdev;
181             thread_params.open_semaphore = open_semaphore;
182             thread_params.open_code = 0;
183             code = (*pwdev->printer_procs.start_render_thread)
184                 (&thread_params);
185             if (code >= 0)
186                 gx_semaphore_wait(open_semaphore);
187             code = thread_params.open_code;
188             gx_semaphore_free(open_semaphore);
189         }
190     }
191     /* ----- Set the recovery procedure for the mem allocator ----- */
192     if (code >= 0) {
193         gs_memory_retrying_set_recover(
194                 (gs_memory_retrying_t *)pwdev->memory->non_gc_memory,
195                 prna_mem_recover,
196                 (void *)pcwdev
197             );
198     }
199     /* --------------------- Wrap up --------------------------------- */
200     /* --------------------------------------------------------------- */
201     if (code < 0) {
202 open_err:
203         /* error mop-up */
204         if (render_memory && !prdev)
205             free_render_memory(render_memory);
206 
207         gdev_prn_dealloc(pwdev);
208         if (writer_is_open) {
209             gdev_prn_close(pdev);
210             pwdev->free_up_bandlist_memory = 0;
211         }
212     }
213     return code;
214 }
215 
216 /* This procedure is called from within the memory allocator when regular */
217 /* malloc's fail -- this procedure tries to free up pages from the queue  */
218 /* and returns a status code indicating whether any more can be freed.    */
219 static gs_memory_recover_status_t
prna_mem_recover(gs_memory_retrying_t * rmem,void * proc_data)220 prna_mem_recover(gs_memory_retrying_t *rmem, void *proc_data)
221 {
222     int pages_remain = 0;
223     gx_device_clist_writer *cldev = proc_data;
224 
225     if (cldev->free_up_bandlist_memory != NULL)
226         pages_remain =
227             (*cldev->free_up_bandlist_memory)( (gx_device *)cldev, false );
228     return (pages_remain > 0) ? RECOVER_STATUS_RETRY_OK : RECOVER_STATUS_NO_RETRY;
229 }
230 
231 /* (Re)set printer device fields which get trampled by gdevprn_open & put_params */
232 static void
reinit_printer_into_printera(gx_device_printer * const pdev)233 reinit_printer_into_printera(
234                              gx_device_printer * const pdev	/* printer to convert */
235 )
236 {
237     /* Change some of the procedure vector to point at async procedures */
238     /* Originals were already saved by gdev_prn_open */
239     if (dev_proc(pdev, close_device) == gdev_prn_close)
240         set_dev_proc(pdev, close_device, gdev_prn_async_write_close_device);
241     set_dev_proc(pdev, output_page, gdev_prn_async_write_output_page);
242     set_dev_proc(pdev, put_params, gdev_prn_async_write_put_params);
243     set_dev_proc(pdev, get_xfont_procs, gx_default_get_xfont_procs);
244     set_dev_proc(pdev, get_xfont_device, gx_default_get_xfont_device);
245     set_dev_proc(pdev, get_hardware_params, gdev_prn_async_write_get_hardware_params);
246 
247     /* clist writer calls this if it runs out of memory & wants to retry */
248     pdev->free_up_bandlist_memory = &gdev_prn_async_write_free_up_bandlist_memory;
249 }
250 
251 /* Generic closing for the writer device. */
252 static int
gdev_prn_async_write_close_device(gx_device * pdev)253 gdev_prn_async_write_close_device(gx_device * pdev)
254 {
255     gx_device_printer *const pwdev = (gx_device_printer *) pdev;
256     gx_device_clist_writer *const pcwdev =
257         &((gx_device_clist *) pdev)->writer;
258 
259     /* Signal render thread to close & terminate when done */
260     gx_page_queue_add_page(pcwdev, pwdev->page_queue,
261                            GX_PAGE_QUEUE_ACTION_TERMINATE, 0, 0);
262 
263     /* Wait for renderer to finish all pages & terminate req */
264     gx_page_queue_wait_until_empty(pwdev->page_queue);
265 
266     /* Cascade down to original close rtn */
267     gdev_prn_close(pdev);
268     pwdev->free_up_bandlist_memory = 0;
269 
270     /* Deallocte dynamic stuff */
271     gdev_prn_dealloc(pwdev);
272     return 0;
273 }
274 
275 /* Deallocte dynamic memory attached to device. Aware of possible imcomplete open */
276 static void
gdev_prn_dealloc(gx_device_printer * pwdev)277 gdev_prn_dealloc(gx_device_printer * pwdev)
278 {
279     gx_device_printer *const prdev = pwdev->async_renderer;
280 
281     /* Delete renderer device & its memory allocator */
282     if (prdev) {
283         gs_memory_t *render_alloc = prdev->memory;
284 
285         gs_free_object(render_alloc, prdev, "gdev_prn_dealloc");
286         free_render_memory(render_alloc);
287     }
288     /* Free page queue */
289     if (pwdev->page_queue) {
290         gx_page_queue_dnit(pwdev->page_queue);
291         gs_free_object(pwdev->bandlist_memory, pwdev->page_queue,
292                        "gdev_prn_dealloc");
293         pwdev->page_queue = 0;
294     }
295     /* Free memory bandlist allocators */
296     if (pwdev->bandlist_memory)
297         free_bandlist_memory(pwdev->bandlist_memory);
298 }
299 
300 /* Open the render portion of a printer device in ASYNC (overlapped) mode.
301 
302  * This routine is always called by concrete device's xx_open_render_device
303  * in lieu of gdev_prn_open.
304  */
305 int
gdev_prn_async_render_open(gx_device_printer * prdev)306 gdev_prn_async_render_open(gx_device_printer * prdev)
307 {
308     gx_device *const pdev = (gx_device *) prdev;
309 
310     prdev->is_async_renderer = true;
311     return gdev_prn_open(pdev);
312 }
313 
314 /* Generic closing for the rendering device. */
315 int
gdev_prn_async_render_close_device(gx_device_printer * prdev)316 gdev_prn_async_render_close_device(gx_device_printer * prdev)
317 {
318     gx_device *const pdev = (gx_device *) prdev;
319 
320     return gdev_prn_close(pdev);
321 }
322 
323 /* (Re)set renderer device fields which get trampled by gdevprn_open & put_params */
324 static void
reinit_printer_into_renderer(gx_device_printer * const pdev)325 reinit_printer_into_renderer(
326                              gx_device_printer * const pdev	/* printer to convert */
327 )
328 {
329     set_dev_proc(pdev, put_params, gdev_prn_async_render_put_params);
330 }
331 
332 /* ---------- Start rasterizer thread ------------ */
333 /*
334  * Must be called by async device driver implementation (see gdevprna.h
335  * under "Synchronizing the Instances"). This is the rendering loop, which
336  * requires its own thread for as long as the device is open. This proc only
337  * returns after the device is closed, or if the open failed. NB that an
338  * open error leaves things in a state where the writer thread will not be
339  * able to close since it's expecting the renderer to acknowledge its
340  * requests before the writer can close.  Ergo, if this routine fails you'll
341  * crash unless the caller fixes the problem & successfully retries this.
342  */
343 int				/* rets 0 ok, -ve error code if open failed */
gdev_prn_async_render_thread(gdev_prn_start_render_params * params)344 gdev_prn_async_render_thread(
345                              gdev_prn_start_render_params * params
346 )
347 {
348     gx_device_printer *const pwdev = params->writer_device;
349     gx_device_printer *const prdev = pwdev->async_renderer;
350     gx_page_queue_entry_t *entry;
351     int code;
352 
353     /* Open device, but don't use default if user didn't override */
354     if (prdev->printer_procs.open_render_device ==
355           gx_default_open_render_device)
356         code = gdev_prn_async_render_open(prdev);
357     else
358         code = (*prdev->printer_procs.open_render_device) (prdev);
359     reinit_printer_into_renderer(prdev);
360 
361     /* The cmd list logic assumes reader's & writer's tile caches are same size */
362     if (code >= 0 &&
363           ((gx_device_clist *) pwdev)->writer.page_tile_cache_size !=
364             ((gx_device_clist *) prdev)->writer.page_tile_cache_size) {
365         gdev_prn_async_render_close_device(prdev);
366         code = gs_note_error(gs_error_VMerror);
367     }
368     params->open_code = code;
369     gx_semaphore_signal(params->open_semaphore);
370     if (code < 0)
371         return code;
372 
373     /* fake open, since not called by gs_opendevice */
374     prdev->is_open = true;
375 
376     /* Successful open */
377     while ((entry = gx_page_queue_start_dequeue(prdev->page_queue))
378            && entry->action != GX_PAGE_QUEUE_ACTION_TERMINATE) {
379         /* Force printer open again if it mysteriously closed. */
380         /* This shouldn't ever happen, but... */
381         if (!prdev->is_open) {
382             if (prdev->printer_procs.open_render_device ==
383                   gx_default_open_render_device)
384                 code = gdev_prn_async_render_open(prdev);
385             else
386                 code = (*prdev->printer_procs.open_render_device) (prdev);
387             reinit_printer_into_renderer(prdev);
388 
389             if (code >= 0) {
390                 prdev->is_open = true;
391                 gdev_prn_output_page((gx_device *) prdev, 0, true);
392             }
393         }
394         if (prdev->is_open) {
395             /* Force retrieved entry onto render device */
396             ((gx_device_clist *) prdev)->common.page_info = entry->page_info;
397 
398             /* Set up device geometry */
399             if (clist_setup_params((gx_device *) prdev) >= 0)
400                 /* Go this again, since setup_params may have trashed it */
401                 ((gx_device_clist *) prdev)->common.page_info = entry->page_info;
402 
403             /* Call appropriate renderer routine to deal w/buffer */
404             /* Ignore status, since we don't know how to deal w/errors! */
405             switch (entry->action) {
406 
407                 case GX_PAGE_QUEUE_ACTION_FULL_PAGE:
408                     (*dev_proc(prdev, output_page))((gx_device *) prdev,
409                                                     entry->num_copies, true);
410                     break;
411 
412                 case GX_PAGE_QUEUE_ACTION_PARTIAL_PAGE:
413                 case GX_PAGE_QUEUE_ACTION_COPY_PAGE:
414                     (*dev_proc(prdev, output_page))((gx_device *) prdev,
415                                                     entry->num_copies, false);
416                     break;
417             }
418             /*
419              * gx_page_queue_finish_dequeue will close and free the band
420              * list files, so we don't need to call clist_close_output_file.
421              */
422         }
423         /* Finalize dequeue & free retrieved queue entry */
424         gx_page_queue_finish_dequeue(entry);
425     }
426 
427     /* Close device, but don't use default if user hasn't overriden. */
428     /* Ignore status, since returning bad status means open failed */
429     if (prdev->printer_procs.close_render_device ==
430           gx_default_close_render_device)
431         gdev_prn_async_render_close_device(prdev);
432     else
433         (*prdev->printer_procs.close_render_device)(prdev);
434 
435     /* undo fake open, since not called by gs_closedevice */
436     prdev->is_open = false;
437 
438     /* Now that device is closed, acknowledge gx_page_queue_terminate */
439     gx_page_queue_finish_dequeue(entry);
440 
441     return 0;
442 }
443 
444 /* ------ Get/put parameters ------ */
445 
446 /* Put parameters. */
447 static int
gdev_prn_async_write_put_params(gx_device * pdev,gs_param_list * plist)448 gdev_prn_async_write_put_params(gx_device * pdev, gs_param_list * plist)
449 {
450     gx_device_clist_writer *const pclwdev =
451         &((gx_device_clist *) pdev)->writer;
452     gx_device_printer *const pwdev = (gx_device_printer *) pdev;
453     gdev_prn_space_params save_sp = pwdev->space_params;
454     int save_height = pwdev->height;
455     int save_width = pwdev->width;
456     int code, ecode;
457 
458     if (!pwdev->is_open)
459         return (*pwdev->orig_procs.put_params) (pdev, plist);
460 
461     /*
462      * First, cascade to real device's put_params.
463      * If that put_params made any changes that require re-opening
464      * the device, just flush the page; the parameter block at the
465      * head of the next page will reflect the changes just made.
466      * If the put_params requires no re-open, just slip it into the
467      * stream in the command buffer. This way, the
468      * writer device should parallel the renderer status at the same point
469      * in their respective executions.
470      *
471      * NB. that all this works only because we take the position that
472      * put_params can make no change that actually affects hardware's state
473      * before the final output_page on the RASTERIZER.
474      */
475     /* Call original procedure, but "closed" to prevent closing device */
476     pwdev->is_open = false;	/* prevent put_params from closing device */
477     code = (*pwdev->orig_procs.put_params) (pdev, plist);
478     pwdev->is_open = true;
479     pwdev->OpenOutputFile = 0;	/* This is wrong, because it causes the same thing in the renderer */
480 
481     /* Flush device or emit to command list, depending if device changed geometry */
482     if (memcmp(&pwdev->space_params, &save_sp, sizeof(save_sp)) != 0 ||
483         pwdev->width != save_width || pwdev->height != save_height
484         ) {
485         int pageq_remaining;
486         int new_width = pwdev->width;
487         int new_height = pwdev->height;
488         gdev_prn_space_params new_sp = pwdev->space_params;
489 
490         /* Need to start a new page, reallocate clist memory */
491         pwdev->width = save_width;
492         pwdev->height = save_height;
493         pwdev->space_params = save_sp;
494 
495         /* First, get rid of any pending partial pages */
496         code = flush_page(pwdev, false);
497 
498         /* Free and reallocate the printer memory. */
499         pageq_remaining = 1;	/* assume there are pages left in queue */
500         do {
501             ecode =
502                 gdev_prn_reallocate_memory(pdev,
503                                            &new_sp, new_width, new_height);
504             if (ecode >= 0)
505                 break;		/* managed to recover enough memory */
506             if (!pdev->is_open) {
507                 /* Disaster! Device was forced closed, which async drivers */
508                 /* aren't suppsed to do. */
509                 gdev_prn_async_write_close_device(pdev);
510                 return ecode;	/* caller 'spozed to know could be closed now */
511             }
512             pclwdev->error_is_retryable = (ecode == gs_error_VMerror);
513         }
514         while (pageq_remaining >= 1 &&
515                (pageq_remaining = ecode =
516                 clist_VMerror_recover(pclwdev, ecode)) >= 0);
517         if (ecode < 0) {
518             gdev_prn_free_memory(pdev);
519             pclwdev->is_open = false;
520             code = ecode;
521         }
522     } else if (code >= 0) {
523         do
524             if ((ecode = cmd_put_params(pclwdev, plist)) >= 0)
525                 break;
526         while ((ecode = clist_VMerror_recover(pclwdev, ecode)) >= 0);
527         if (ecode < 0 && pclwdev->error_is_retryable &&
528             pclwdev->driver_call_nesting == 0
529             )
530             ecode = clist_VMerror_recover_flush(pclwdev, ecode);
531         if (ecode < 0)
532             code = ecode;
533     }
534     /* Reset fields that got trashed by gdev_prn_put_params and/or gdev_prn_open */
535     reinit_printer_into_printera(pwdev);
536 
537     return code;
538 }
539 
540 /* Get hardware-detected params. Drain page queue, then call renderer version */
541 static int
gdev_prn_async_write_get_hardware_params(gx_device * pdev,gs_param_list * plist)542 gdev_prn_async_write_get_hardware_params(gx_device * pdev, gs_param_list * plist)
543 {
544     gx_device_printer *const pwdev = (gx_device_printer *) pdev;
545     gx_device_printer *const prdev = pwdev->async_renderer;
546 
547     if (!pwdev->is_open || !prdev)
548         /* if not open, just use device's get hw params */
549         return (dev_proc(pwdev, get_hardware_params))(pdev, plist);
550     else {
551         /* wait for empty pipeline */
552         gx_page_queue_wait_until_empty(pwdev->page_queue);
553 
554         /* get reader's h/w params, now that writer & reader are sync'ed */
555         return (dev_proc(prdev, get_hardware_params))
556             ((gx_device *) prdev, plist);
557     }
558 }
559 
560 /* Put parameters on RENDERER. */
561 static int		/* returns -ve err code only if FATAL error (can't keep rendering) */
gdev_prn_async_render_put_params(gx_device * pdev,gs_param_list * plist)562 gdev_prn_async_render_put_params(gx_device * pdev, gs_param_list * plist)
563 {
564     gx_device_printer *const prdev = (gx_device_printer *) pdev;
565     bool save_is_open = prdev->is_open;
566 
567     /* put_parms from clist are guaranteed to never re-init device */
568     /* They're also pretty much guaranteed to always succeed */
569     (*prdev->orig_procs.put_params) (pdev, plist);
570 
571     /* If device closed itself, try to open & clear it */
572     if (!prdev->is_open && save_is_open) {
573         int code;
574 
575         if (prdev->printer_procs.open_render_device ==
576               gx_default_open_render_device)
577             code = gdev_prn_async_render_open(prdev);
578         else
579             code = (*prdev->printer_procs.open_render_device) (prdev);
580         reinit_printer_into_renderer(prdev);
581         if (code >= 0)
582             /****** CLEAR PAGE SOMEHOW ******/;
583         else
584             return code;	/* this'll cause clist to stop processing this band! */
585     }
586     return 0;			/* return this unless FATAL status */
587 }
588 
589 /* ------ Others ------ */
590 
591 /* Output page causes file to get added to page queue for later rasterizing */
592 static int
gdev_prn_async_write_output_page(gx_device * pdev,int num_copies,int flush)593 gdev_prn_async_write_output_page(gx_device * pdev, int num_copies, int flush)
594 {
595     gx_device_printer *const pwdev = (gx_device_printer *) pdev;
596     gx_device_clist_writer *const pcwdev =
597         &((gx_device_clist *) pdev)->writer;
598     int flush_code;
599     int add_code;
600     int open_code;
601     int one_last_time = 1;
602 
603     /* do NOT close files before sending to page queue */
604     flush_code = clist_end_page(pcwdev);
605     add_code = gx_page_queue_add_page(pcwdev, pwdev->page_queue,
606                                 (flush ? GX_PAGE_QUEUE_ACTION_FULL_PAGE :
607                                  GX_PAGE_QUEUE_ACTION_COPY_PAGE),
608                                       &pcwdev->page_info, num_copies);
609     if (flush && (flush_code >= 0) && (add_code >= 0)) {
610         /* This page is finished */
611         gx_finish_output_page(pdev, num_copies, flush);
612     }
613 
614     /* Open new band files to take the place of ones added to page queue */
615     while ((open_code = (*gs_clist_device_procs.open_device)
616             ((gx_device *) pdev)) == gs_error_VMerror) {
617         /* Open failed, try after a page gets rendered */
618         if (!gx_page_queue_wait_one_page(pwdev->page_queue)
619             && one_last_time-- <= 0)
620             break;
621     }
622 
623     return
624         (flush_code < 0 ? flush_code : open_code < 0 ? open_code :
625          add_code < 0 ? add_code : 0);
626 }
627 
628 /* Free bandlist memory waits until the rasterizer runs enough to free some mem */
629 static int			/* -ve err,  0 if no pages remain to rasterize, 1 if more pages to go */
gdev_prn_async_write_free_up_bandlist_memory(gx_device * pdev,bool flush_current)630 gdev_prn_async_write_free_up_bandlist_memory(gx_device * pdev, bool flush_current)
631 {
632     gx_device_printer *const pwdev = (gx_device_printer *) pdev;
633 
634     if (flush_current) {
635         int code = flush_page(pwdev, true);
636 
637         if (code < 0)
638             return code;
639     }
640     return gx_page_queue_wait_one_page(pwdev->page_queue);
641 }
642 
643 /* -------- Utility Routines --------- */
644 
645 /* Flush out any partial pages accumulated in device */
646 /* LEAVE DEVICE in a state where it must be re-opened/re-init'd */
647 static int			/* ret 0 ok no flush, -ve error code */
flush_page(gx_device_printer * pwdev,bool partial)648 flush_page(
649            gx_device_printer * pwdev,	/* async writer device to flush */
650            bool partial	/* true if only partial page */
651 )
652 {
653     gx_device_clist *const pcldev = (gx_device_clist *) pwdev;
654     gx_device_clist_writer *const pcwdev = &pcldev->writer;
655     int flush_code = 0;
656     int add_code = 0;
657 
658     /* do NOT close files before sending to page queue */
659     flush_code = clist_end_page(pcwdev);
660     add_code = gx_page_queue_add_page(pcwdev, pwdev->page_queue,
661                                 (partial ? GX_PAGE_QUEUE_ACTION_PARTIAL_PAGE :
662                                  GX_PAGE_QUEUE_ACTION_FULL_PAGE),
663                                       &pcwdev->page_info, 0);
664 
665     /* Device no longer has BANDFILES, so it must be re-init'd by caller */
666     pcwdev->page_info.bfile = pcwdev->page_info.cfile = 0;
667 
668     /* return the worst of the status. */
669     if (flush_code < 0)
670         return flush_code;
671     return add_code;
672 }
673 
674 /* Flush any pending partial pages, re-open device */
675 static int
reopen_clist_after_flush(gx_device_printer * pwdev)676 reopen_clist_after_flush(
677                          gx_device_printer * pwdev	/* async writer device to flush */
678 )
679 {
680     int open_code;
681     int one_last_time = 1;
682 
683     /* Open new band files to take the place of ones added to page queue */
684     while ((open_code = (*gs_clist_device_procs.open_device)
685             ((gx_device *) pwdev)) == gs_error_VMerror) {
686         /* Open failed, try after a page gets rendered */
687         if (!gx_page_queue_wait_one_page(pwdev->page_queue)
688             && one_last_time-- <= 0)
689             break;
690     }
691     return open_code;
692 }
693 
694 /*
695  * The bandlist does allocations on the writer's thread & deallocations on
696  * the reader's thread, so it needs to have mutual exclusion from itself, as
697  * well as from other memory allocators since the reader can run at the same
698  * time as the interpreter.  The bandlist allocator therefore consists of
699  * a monitor-locking wrapper around either a direct heap allocator or (for
700  * testing) a fixed-limit allocator.
701  */
702 
703 /* Create a bandlist allocator. */
704 static int
alloc_bandlist_memory(gs_memory_t ** final_allocator,gs_memory_t * base_allocator)705 alloc_bandlist_memory(gs_memory_t ** final_allocator,
706                       gs_memory_t * base_allocator)
707 {
708     gs_memory_t *data_allocator = 0;
709     gs_memory_locked_t *locked_allocator = 0;
710     int code = 0;
711 
712 #if defined(DEBUG) && defined(DebugBandlistMemorySize)
713     code = alloc_render_memory(&data_allocator, base_allocator,
714                                DebugBandlistMemorySize);
715     if (code < 0)
716         return code;
717 #else
718     data_allocator = (gs_memory_t *)gs_malloc_memory_init();
719     if (!data_allocator)
720         return_error(gs_error_VMerror);
721 #endif
722     locked_allocator = (gs_memory_locked_t *)
723         gs_alloc_bytes_immovable(data_allocator, sizeof(gs_memory_locked_t),
724                                  "alloc_bandlist_memory(locked allocator)");
725     if (!locked_allocator)
726         goto alloc_err;
727     code = gs_memory_locked_init(locked_allocator, data_allocator);
728     if (code < 0)
729         goto alloc_err;
730     *final_allocator = (gs_memory_t *)locked_allocator;
731     return 0;
732 alloc_err:
733     if (locked_allocator)
734         free_bandlist_memory((gs_memory_t *)locked_allocator);
735     else if (data_allocator)
736         gs_memory_free_all(data_allocator, FREE_ALL_EVERYTHING,
737                            "alloc_bandlist_memory(data allocator)");
738     return (code < 0 ? code : gs_note_error(gs_error_VMerror));
739 }
740 
741 /* Free a bandlist allocator. */
742 static void
free_bandlist_memory(gs_memory_t * bandlist_allocator)743 free_bandlist_memory(gs_memory_t *bandlist_allocator)
744 {
745     gs_memory_locked_t *const lmem = (gs_memory_locked_t *)bandlist_allocator;
746     gs_memory_t *data_mem = gs_memory_locked_target(lmem);
747 
748     gs_memory_free_all(bandlist_allocator,
749                        FREE_ALL_STRUCTURES | FREE_ALL_ALLOCATOR,
750                        "free_bandlist_memory(locked allocator)");
751     if (data_mem)
752         gs_memory_free_all(data_mem, FREE_ALL_EVERYTHING,
753                            "free_bandlist_memory(data allocator)");
754 }
755 
756 /* Create an allocator with a fixed memory limit. */
757 static int
alloc_render_memory(gs_memory_t ** final_allocator,gs_memory_t * base_allocator,long space)758 alloc_render_memory(gs_memory_t **final_allocator,
759                     gs_memory_t *base_allocator, long space)
760 {
761     gs_ref_memory_t *rmem =
762         ialloc_alloc_state((gs_memory_t *)base_allocator, space);
763     vm_spaces spaces;
764     int i, code;
765 
766     if (rmem == 0)
767         return_error(gs_error_VMerror);
768     code = ialloc_add_chunk(rmem, space, "alloc_render_memory");
769     if (code < 0) {
770         gs_memory_free_all((gs_memory_t *)rmem, FREE_ALL_EVERYTHING,
771                            "alloc_render_memory");
772         return code;
773     }
774     *final_allocator = (gs_memory_t *)rmem;
775 
776     /* Call the reclaim procedure to delete the string marking tables	*/
777     /* Only need this once since no other chunks will ever exist	*/
778 
779     for ( i = 0; i < countof(spaces_indexed); ++i )
780         spaces_indexed[i] = 0;
781     space_local = space_global = (gs_ref_memory_t *)rmem;
782     spaces.vm_reclaim = gs_nogc_reclaim;	/* no real GC on this chunk */
783     GS_RECLAIM(&spaces, false);
784 
785     return 0;
786 }
787 
788 /* Free an allocator with a fixed memory limit. */
789 static void
free_render_memory(gs_memory_t * render_allocator)790 free_render_memory(gs_memory_t *render_allocator)
791 {
792     if (render_allocator)
793         gs_memory_free_all(render_allocator, FREE_ALL_EVERYTHING,
794                            "free_render_memory");
795 }
796