1 /* Copyright (C) 2001-2006 Artifex Software, Inc.
2 All Rights Reserved.
3
4 This software is provided AS-IS with no warranty, either express or
5 implied.
6
7 This software is distributed under license and may not be copied, modified
8 or distributed except as expressly authorized under the terms of that
9 license. Refer to licensing information at http://www.artifex.com/
10 or contact Artifex Software, Inc., 7 Mt. Lassen Drive - Suite A-134,
11 San Rafael, CA 94903, U.S.A., +1(415)492-9861, for further information.
12 */
13 /* $Id: gdevmem.c 10620 2010-01-19 15:48:57Z robin $ */
14 /* Generic "memory" (stored bitmap) device */
15 #include "memory_.h"
16 #include "gx.h"
17 #include "gsdevice.h"
18 #include "gserrors.h"
19 #include "gsrect.h"
20 #include "gsstruct.h"
21 #include "gxarith.h"
22 #include "gxdevice.h"
23 #include "gxgetbit.h"
24 #include "gxdevmem.h" /* semi-public definitions */
25 #include "gdevmem.h" /* private definitions */
26 #include "gstrans.h"
27
28 /* Structure descriptor */
29 public_st_device_memory();
30
31 /* GC procedures */
32 static
ENUM_PTRS_WITH(device_memory_enum_ptrs,gx_device_memory * mptr)33 ENUM_PTRS_WITH(device_memory_enum_ptrs, gx_device_memory *mptr)
34 {
35 return ENUM_USING(st_device_forward, vptr, sizeof(gx_device_forward), index - 3);
36 }
37 case 0: ENUM_RETURN((mptr->foreign_bits ? NULL : (void *)mptr->base));
38 case 1: ENUM_RETURN((mptr->foreign_line_pointers ? NULL : (void *)mptr->line_ptrs));
39 ENUM_STRING_PTR(2, gx_device_memory, palette);
40 ENUM_PTRS_END
41 static
RELOC_PTRS_WITH(device_memory_reloc_ptrs,gx_device_memory * mptr)42 RELOC_PTRS_WITH(device_memory_reloc_ptrs, gx_device_memory *mptr)
43 {
44 if (!mptr->foreign_bits) {
45 byte *base_old = mptr->base;
46 long reloc;
47 int y;
48
49 RELOC_PTR(gx_device_memory, base);
50 reloc = base_old - mptr->base;
51 for (y = 0; y < mptr->height; y++)
52 mptr->line_ptrs[y] -= reloc;
53 /* Relocate line_ptrs, which also points into the data area. */
54 mptr->line_ptrs = (byte **) ((byte *) mptr->line_ptrs - reloc);
55 } else if (!mptr->foreign_line_pointers) {
56 RELOC_PTR(gx_device_memory, line_ptrs);
57 }
58 RELOC_CONST_STRING_PTR(gx_device_memory, palette);
59 RELOC_USING(st_device_forward, vptr, sizeof(gx_device_forward));
60 }
61 RELOC_PTRS_END
62
63 /* Define the palettes for monobit devices. */
64 static const byte b_w_palette_string[6] = {
65 0xff, 0xff, 0xff, 0, 0, 0
66 };
67 const gs_const_string mem_mono_b_w_palette = {
68 b_w_palette_string, 6
69 };
70 static const byte w_b_palette_string[6] = {
71 0, 0, 0, 0xff, 0xff, 0xff
72 };
73 const gs_const_string mem_mono_w_b_palette = {
74 w_b_palette_string, 6
75 };
76
77 /* ------ Generic code ------ */
78
79 /* Return the appropriate memory device for a given */
80 /* number of bits per pixel (0 if none suitable). */
81 static const gx_device_memory *const mem_devices[65] = {
82 0, &mem_mono_device, &mem_mapped2_device, 0, &mem_mapped4_device,
83 0, 0, 0, &mem_mapped8_device,
84 0, 0, 0, 0, 0, 0, 0, &mem_true16_device,
85 0, 0, 0, 0, 0, 0, 0, &mem_true24_device,
86 0, 0, 0, 0, 0, 0, 0, &mem_true32_device,
87 0, 0, 0, 0, 0, 0, 0, &mem_true40_device,
88 0, 0, 0, 0, 0, 0, 0, &mem_true48_device,
89 0, 0, 0, 0, 0, 0, 0, &mem_true56_device,
90 0, 0, 0, 0, 0, 0, 0, &mem_true64_device
91 };
92 const gx_device_memory *
gdev_mem_device_for_bits(int bits_per_pixel)93 gdev_mem_device_for_bits(int bits_per_pixel)
94 {
95 return ((uint)bits_per_pixel > 64 ? (const gx_device_memory *)0 :
96 mem_devices[bits_per_pixel]);
97 }
98 /* Do the same for a word-oriented device. */
99 static const gx_device_memory *const mem_word_devices[65] = {
100 0, &mem_mono_device, &mem_mapped2_word_device, 0, &mem_mapped4_word_device,
101 0, 0, 0, &mem_mapped8_word_device,
102 0, 0, 0, 0, 0, 0, 0, 0 /*no 16-bit word device*/,
103 0, 0, 0, 0, 0, 0, 0, &mem_true24_word_device,
104 0, 0, 0, 0, 0, 0, 0, &mem_true32_word_device,
105 0, 0, 0, 0, 0, 0, 0, &mem_true40_word_device,
106 0, 0, 0, 0, 0, 0, 0, &mem_true48_word_device,
107 0, 0, 0, 0, 0, 0, 0, &mem_true56_word_device,
108 0, 0, 0, 0, 0, 0, 0, &mem_true64_word_device
109 };
110 const gx_device_memory *
gdev_mem_word_device_for_bits(int bits_per_pixel)111 gdev_mem_word_device_for_bits(int bits_per_pixel)
112 {
113 return ((uint)bits_per_pixel > 64 ? (const gx_device_memory *)0 :
114 mem_word_devices[bits_per_pixel]);
115 }
116
117 /* Test whether a device is a memory device */
118 bool
gs_device_is_memory(const gx_device * dev)119 gs_device_is_memory(const gx_device * dev)
120 {
121 /*
122 * We use the draw_thin_line procedure to mark memory devices.
123 * See gdevmem.h.
124 */
125 int bits_per_pixel = dev->color_info.depth;
126 const gx_device_memory *mdproto;
127
128 if ((uint)bits_per_pixel > 64)
129 return false;
130 mdproto = mem_devices[bits_per_pixel];
131 if (mdproto != 0 && dev_proc(dev, draw_thin_line) == dev_proc(mdproto, draw_thin_line))
132 return true;
133 mdproto = mem_word_devices[bits_per_pixel];
134 return (mdproto != 0 && dev_proc(dev, draw_thin_line) == dev_proc(mdproto, draw_thin_line));
135 }
136
137 /* Make a memory device. */
138 /* Note that the default for monobit devices is white = 0, black = 1. */
139 void
gs_make_mem_device(gx_device_memory * dev,const gx_device_memory * mdproto,gs_memory_t * mem,int page_device,gx_device * target)140 gs_make_mem_device(gx_device_memory * dev, const gx_device_memory * mdproto,
141 gs_memory_t * mem, int page_device, gx_device * target)
142 {
143 gx_device_init((gx_device *) dev, (const gx_device *)mdproto,
144 mem, true);
145 dev->stype = &st_device_memory;
146 switch (page_device) {
147 case -1:
148 set_dev_proc(dev, get_page_device, gx_default_get_page_device);
149 break;
150 case 1:
151 set_dev_proc(dev, get_page_device, gx_page_device_get_page_device);
152 break;
153 }
154 /* Preload the black and white cache. */
155 if (target == 0) {
156 if (dev->color_info.depth == 1) {
157 /* The default for black-and-white devices is inverted. */
158 dev->cached_colors.black = 1;
159 dev->cached_colors.white = 0;
160 } else {
161 dev->cached_colors.black = 0;
162 dev->cached_colors.white = (1 << dev->color_info.depth) - 1;
163 }
164 } else {
165 gx_device_set_target((gx_device_forward *)dev, target);
166 /* Forward the color mapping operations to the target. */
167 gx_device_forward_color_procs((gx_device_forward *) dev);
168 gx_device_copy_color_procs((gx_device *)dev, target);
169 dev->cached_colors = target->cached_colors;
170 }
171 if (dev->color_info.depth == 1) {
172 gx_color_value cv[3];
173
174 cv[0] = cv[1] = cv[2] = 0;
175 gdev_mem_mono_set_inverted(dev, (target == 0 ||
176 (*dev_proc(dev, map_rgb_color))((gx_device *)dev, cv) != 0));
177 }
178 check_device_separable((gx_device *)dev);
179 gx_device_fill_in_procs((gx_device *)dev);
180 dev->band_y = 0;
181 }
182
183 /* Make a memory device using copydevice, this should replace gs_make_mem_device. */
184 /* Note that the default for monobit devices is white = 0, black = 1. */
185 int
gs_make_mem_device_with_copydevice(gx_device_memory ** ppdev,const gx_device_memory * mdproto,gs_memory_t * mem,int page_device,gx_device * target)186 gs_make_mem_device_with_copydevice(gx_device_memory ** ppdev,
187 const gx_device_memory * mdproto,
188 gs_memory_t * mem,
189 int page_device,
190 gx_device * target)
191 {
192 int code;
193 gx_device_memory *pdev;
194
195 if (mem == 0)
196 return -1;
197
198 code = gs_copydevice((gx_device **)&pdev,
199 (const gx_device *)mdproto,
200 mem);
201 if (code < 0)
202 return code;
203
204 switch (page_device) {
205 case -1:
206 set_dev_proc(pdev, get_page_device, gx_default_get_page_device);
207 break;
208 case 1:
209 set_dev_proc(pdev, get_page_device, gx_page_device_get_page_device);
210 break;
211 }
212 /* Preload the black and white cache. */
213 if (target == 0) {
214 if (pdev->color_info.depth == 1) {
215 /* The default for black-and-white devices is inverted. */
216 pdev->cached_colors.black = 1;
217 pdev->cached_colors.white = 0;
218 } else {
219 pdev->cached_colors.black = 0;
220 pdev->cached_colors.white = (1 << pdev->color_info.depth) - 1;
221 }
222 } else {
223 gx_device_set_target((gx_device_forward *)pdev, target);
224 /* Forward the color mapping operations to the target. */
225 gx_device_forward_color_procs((gx_device_forward *) pdev);
226 gx_device_copy_color_procs((gx_device *)pdev, target);
227 pdev->cached_colors = target->cached_colors;
228 }
229 if (pdev->color_info.depth == 1) {
230 gx_color_value cv[3];
231
232 cv[0] = cv[1] = cv[2] = 0;
233 gdev_mem_mono_set_inverted(pdev, (target == 0 ||
234 (*dev_proc(pdev, map_rgb_color))((gx_device *)pdev, cv) != 0));
235 }
236 check_device_separable((gx_device *)pdev);
237 gx_device_fill_in_procs((gx_device *)pdev);
238 pdev->band_y = 0;
239 *ppdev = pdev;
240 return 0;
241 }
242
243
244 /* Make a monobit memory device using copydevice */
245 int
gs_make_mem_mono_device_with_copydevice(gx_device_memory ** ppdev,gs_memory_t * mem,gx_device * target)246 gs_make_mem_mono_device_with_copydevice(gx_device_memory ** ppdev, gs_memory_t * mem,
247 gx_device * target)
248 {
249 int code;
250 gx_device_memory *pdev;
251
252 if (mem == 0)
253 return -1;
254
255 code = gs_copydevice((gx_device **)&pdev,
256 (const gx_device *)&mem_mono_device,
257 mem);
258 if (code < 0)
259 return code;
260
261 set_dev_proc(pdev, get_page_device, gx_default_get_page_device);
262 gx_device_set_target((gx_device_forward *)pdev, target);
263 gdev_mem_mono_set_inverted(pdev, true);
264 check_device_separable((gx_device *)pdev);
265 gx_device_fill_in_procs((gx_device *)pdev);
266 *ppdev = pdev;
267 return 0;
268 }
269
270
271 /* Make a monobit memory device. This is never a page device. */
272 /* Note that white=0, black=1. */
273 void
gs_make_mem_mono_device(gx_device_memory * dev,gs_memory_t * mem,gx_device * target)274 gs_make_mem_mono_device(gx_device_memory * dev, gs_memory_t * mem,
275 gx_device * target)
276 {
277 gx_device_init((gx_device *)dev, (const gx_device *)&mem_mono_device,
278 mem, true);
279 set_dev_proc(dev, get_page_device, gx_default_get_page_device);
280 gx_device_set_target((gx_device_forward *)dev, target);
281 gdev_mem_mono_set_inverted(dev, true);
282 check_device_separable((gx_device *)dev);
283 gx_device_fill_in_procs((gx_device *)dev);
284 }
285
286 /* Define whether a monobit memory device is inverted (black=1). */
287 void
gdev_mem_mono_set_inverted(gx_device_memory * dev,bool black_is_1)288 gdev_mem_mono_set_inverted(gx_device_memory * dev, bool black_is_1)
289 {
290 if (black_is_1)
291 dev->palette = mem_mono_b_w_palette;
292 else
293 dev->palette = mem_mono_w_b_palette;
294 }
295
296 /*
297 * Compute the size of the bitmap storage, including the space for the scan
298 * line pointer table. Note that scan lines are padded to a multiple of
299 * align_bitmap_mod bytes, and additional padding may be needed if the
300 * pointer table must be aligned to an even larger modulus.
301 *
302 * The computation for planar devices is a little messier. Each plane
303 * must pad its scan lines, and then we must pad again for the pointer
304 * tables (one table per plane).
305 *
306 * Return VMerror if the size exceeds max ulong
307 */
308 int
gdev_mem_bits_size(const gx_device_memory * dev,int width,int height,ulong * psize)309 gdev_mem_bits_size(const gx_device_memory * dev, int width, int height, ulong *psize)
310 {
311 int num_planes = dev->num_planes;
312 gx_render_plane_t plane1;
313 const gx_render_plane_t *planes;
314 ulong size;
315 int pi;
316
317 if (num_planes)
318 planes = dev->planes;
319 else
320 planes = &plane1, plane1.depth = dev->color_info.depth, num_planes = 1;
321 for (size = 0, pi = 0; pi < num_planes; ++pi)
322 size += bitmap_raster(width * planes[pi].depth);
323 if (height != 0)
324 if (size > (max_ulong - ARCH_ALIGN_PTR_MOD) / (ulong)height)
325 return_error(gs_error_VMerror);
326 *psize = ROUND_UP(size * height, ARCH_ALIGN_PTR_MOD);
327 return 0;
328 }
329 ulong
gdev_mem_line_ptrs_size(const gx_device_memory * dev,int width,int height)330 gdev_mem_line_ptrs_size(const gx_device_memory * dev, int width, int height)
331 {
332 return (ulong)height * sizeof(byte *) * max(dev->num_planes, 1);
333 }
334 int
gdev_mem_data_size(const gx_device_memory * dev,int width,int height,ulong * psize)335 gdev_mem_data_size(const gx_device_memory * dev, int width, int height, ulong *psize)
336 {
337 ulong bits_size;
338 ulong line_ptrs_size = gdev_mem_line_ptrs_size(dev, width, height);
339
340 if (gdev_mem_bits_size(dev, width, height, &bits_size) < 0 ||
341 bits_size > max_ulong - line_ptrs_size)
342 return_error(gs_error_VMerror);
343 *psize = bits_size + line_ptrs_size;
344 return 0;
345 }
346 /*
347 * Do the inverse computation: given a width (in pixels) and a buffer size,
348 * compute the maximum height.
349 */
350 int
gdev_mem_max_height(const gx_device_memory * dev,int width,ulong size,bool page_uses_transparency)351 gdev_mem_max_height(const gx_device_memory * dev, int width, ulong size,
352 bool page_uses_transparency)
353 {
354 int height;
355 ulong max_height;
356 ulong data_size;
357
358 if (page_uses_transparency) {
359 /*
360 * If the device is using PDF 1.4 transparency then we will need to
361 * also allocate image buffers for doing the blending operations.
362 * We can only estimate the space requirements. However since it
363 * is only an estimate, we may exceed our desired buffer space while
364 * processing the file.
365 */
366 max_height = size / (bitmap_raster(width
367 * dev->color_info.depth + ESTIMATED_PDF14_ROW_SPACE(width))
368 + sizeof(byte *) * max(dev->num_planes, 1));
369 height = (int)min(max_height, max_int);
370 } else {
371 /* For non PDF 1.4 transparency, we can do an exact calculation */
372 max_height = size /
373 (bitmap_raster(width * dev->color_info.depth) +
374 sizeof(byte *) * max(dev->num_planes, 1));
375 height = (int)min(max_height, max_int);
376 /*
377 * Because of alignment rounding, the just-computed height might
378 * be too large by a small amount. Adjust it the easy way.
379 */
380 do {
381 gdev_mem_data_size(dev, width, height, &data_size);
382 if (data_size <= size)
383 break;
384 --height;
385 } while (data_size > size);
386 }
387 return height;
388 }
389
390 /* Open a memory device, allocating the data area if appropriate, */
391 /* and create the scan line table. */
392 int
mem_open(gx_device * dev)393 mem_open(gx_device * dev)
394 {
395 gx_device_memory *const mdev = (gx_device_memory *)dev;
396
397 /* Check that we aren't trying to open a planar device as chunky. */
398 if (mdev->num_planes)
399 return_error(gs_error_rangecheck);
400 return gdev_mem_open_scan_lines(mdev, dev->height);
401 }
402 int
gdev_mem_open_scan_lines(gx_device_memory * mdev,int setup_height)403 gdev_mem_open_scan_lines(gx_device_memory *mdev, int setup_height)
404 {
405 bool line_pointers_adjacent = true;
406 ulong size;
407
408 if (setup_height < 0 || setup_height > mdev->height)
409 return_error(gs_error_rangecheck);
410 if (mdev->bitmap_memory != 0) {
411 /* Allocate the data now. */
412 if (gdev_mem_bitmap_size(mdev, &size) < 0)
413 return_error(gs_error_VMerror);
414
415 if ((uint) size != size) /* ulong may be bigger than uint */
416 return_error(gs_error_limitcheck);
417 mdev->base = gs_alloc_bytes(mdev->bitmap_memory, (uint)size,
418 "mem_open");
419 if (mdev->base == 0)
420 return_error(gs_error_VMerror);
421 mdev->foreign_bits = false;
422 } else if (mdev->line_pointer_memory != 0) {
423 /* Allocate the line pointers now. */
424
425 mdev->line_ptrs = (byte **)
426 gs_alloc_byte_array(mdev->line_pointer_memory, mdev->height,
427 sizeof(byte *) * max(mdev->num_planes, 1),
428 "gdev_mem_open_scan_lines");
429 if (mdev->line_ptrs == 0)
430 return_error(gs_error_VMerror);
431 mdev->foreign_line_pointers = false;
432 line_pointers_adjacent = false;
433 }
434 if (line_pointers_adjacent) {
435 gdev_mem_bits_size(mdev, mdev->width, mdev->height, &size);
436 mdev->line_ptrs = (byte **)(mdev->base + size);
437 }
438 mdev->raster = gdev_mem_raster(mdev);
439 return gdev_mem_set_line_ptrs(mdev, NULL, 0, NULL, setup_height);
440 }
441 /*
442 * Set up the scan line pointers of a memory device.
443 * See gxdevmem.h for the detailed specification.
444 * Sets or uses line_ptrs, base, raster; uses width, color_info.depth,
445 * num_planes, plane_depths, plane_depth.
446 */
447 int
gdev_mem_set_line_ptrs(gx_device_memory * mdev,byte * base,int raster,byte ** line_ptrs,int setup_height)448 gdev_mem_set_line_ptrs(gx_device_memory * mdev, byte * base, int raster,
449 byte **line_ptrs, int setup_height)
450 {
451 int num_planes = mdev->num_planes;
452 gx_render_plane_t plane1;
453 const gx_render_plane_t *planes;
454 byte **pline =
455 (line_ptrs ? (mdev->line_ptrs = line_ptrs) : mdev->line_ptrs);
456 byte *data =
457 (base ? (mdev->raster = raster, mdev->base = base) :
458 (raster = mdev->raster, mdev->base));
459 int pi;
460
461 if (num_planes) {
462 if (base && !mdev->plane_depth)
463 return_error(gs_error_rangecheck);
464 planes = mdev->planes;
465 } else {
466 planes = &plane1;
467 plane1.depth = mdev->color_info.depth;
468 num_planes = 1;
469 }
470
471 for (pi = 0; pi < num_planes; ++pi) {
472 int raster = bitmap_raster(mdev->width * planes[pi].depth);
473 byte **pptr = pline;
474 byte **pend = pptr + setup_height;
475 byte *scan_line = data;
476
477 while (pptr < pend) {
478 *pptr++ = scan_line;
479 scan_line += raster;
480 }
481 data += raster * mdev->height;
482 pline += setup_height; /* not mdev->height, see gxdevmem.h */
483 }
484
485 return 0;
486 }
487
488 /* Return the initial transformation matrix */
489 void
mem_get_initial_matrix(gx_device * dev,gs_matrix * pmat)490 mem_get_initial_matrix(gx_device * dev, gs_matrix * pmat)
491 {
492 gx_device_memory * const mdev = (gx_device_memory *)dev;
493
494 pmat->xx = mdev->initial_matrix.xx;
495 pmat->xy = mdev->initial_matrix.xy;
496 pmat->yx = mdev->initial_matrix.yx;
497 pmat->yy = mdev->initial_matrix.yy;
498 pmat->tx = mdev->initial_matrix.tx;
499 pmat->ty = mdev->initial_matrix.ty;
500 }
501
502 /* Close a memory device, freeing the data area if appropriate. */
503 int
mem_close(gx_device * dev)504 mem_close(gx_device * dev)
505 {
506 gx_device_memory * const mdev = (gx_device_memory *)dev;
507
508 if (mdev->bitmap_memory != 0) {
509 gs_free_object(mdev->bitmap_memory, mdev->base, "mem_close");
510 /*
511 * The following assignment is strictly for the benefit of one
512 * client that is sloppy about using is_open properly.
513 */
514 mdev->base = 0;
515 } else if (mdev->line_pointer_memory != 0) {
516 gs_free_object(mdev->line_pointer_memory, mdev->line_ptrs,
517 "mem_close");
518 mdev->line_ptrs = 0; /* ibid. */
519 }
520 return 0;
521 }
522
523 /* Copy bits to a client. */
524 #undef chunk
525 #define chunk byte
526 int
mem_get_bits_rectangle(gx_device * dev,const gs_int_rect * prect,gs_get_bits_params_t * params,gs_int_rect ** unread)527 mem_get_bits_rectangle(gx_device * dev, const gs_int_rect * prect,
528 gs_get_bits_params_t * params, gs_int_rect ** unread)
529 {
530 gx_device_memory * const mdev = (gx_device_memory *)dev;
531 gs_get_bits_options_t options = params->options;
532 int x = prect->p.x, w = prect->q.x - x, y = prect->p.y, h = prect->q.y - y;
533
534 if (options == 0) {
535 params->options =
536 (GB_ALIGN_STANDARD | GB_ALIGN_ANY) |
537 (GB_RETURN_COPY | GB_RETURN_POINTER) |
538 (GB_OFFSET_0 | GB_OFFSET_SPECIFIED | GB_OFFSET_ANY) |
539 (GB_RASTER_STANDARD | GB_RASTER_SPECIFIED | GB_RASTER_ANY) |
540 GB_PACKING_CHUNKY | GB_COLORS_NATIVE | GB_ALPHA_NONE;
541 return_error(gs_error_rangecheck);
542 }
543 if ((w <= 0) | (h <= 0)) {
544 if ((w | h) < 0)
545 return_error(gs_error_rangecheck);
546 return 0;
547 }
548 if (x < 0 || w > dev->width - x ||
549 y < 0 || h > dev->height - y
550 )
551 return_error(gs_error_rangecheck);
552 {
553 gs_get_bits_params_t copy_params;
554 byte *base = scan_line_base(mdev, y);
555 int code;
556
557 copy_params.options =
558 GB_COLORS_NATIVE | GB_PACKING_CHUNKY | GB_ALPHA_NONE |
559 (mdev->raster ==
560 bitmap_raster(mdev->width * mdev->color_info.depth) ?
561 GB_RASTER_STANDARD : GB_RASTER_SPECIFIED);
562 copy_params.raster = mdev->raster;
563 code = gx_get_bits_return_pointer(dev, x, h, params,
564 ©_params, base);
565 if (code >= 0)
566 return code;
567 return gx_get_bits_copy(dev, x, w, h, params, ©_params, base,
568 gx_device_raster(dev, true));
569 }
570 }
571
572 #if !arch_is_big_endian
573
574 /*
575 * Swap byte order in a rectangular subset of a bitmap. If store = true,
576 * assume the rectangle will be overwritten, so don't swap any bytes where
577 * it doesn't matter. The caller has already done a fit_fill or fit_copy.
578 * Note that the coordinates are specified in bits, not in terms of the
579 * actual device depth.
580 */
581 void
mem_swap_byte_rect(byte * base,uint raster,int x,int w,int h,bool store)582 mem_swap_byte_rect(byte * base, uint raster, int x, int w, int h, bool store)
583 {
584 int xbit = x & 31;
585
586 if (store) {
587 if (xbit + w > 64) { /* Operation spans multiple words. */
588 /* Just swap the words at the left and right edges. */
589 if (xbit != 0)
590 mem_swap_byte_rect(base, raster, x, 1, h, false);
591 x += w - 1;
592 xbit = x & 31;
593 if (xbit == 31)
594 return;
595 w = 1;
596 }
597 }
598 /* Swap the entire rectangle (or what's left of it). */
599 {
600 byte *row = base + ((x >> 5) << 2);
601 int nw = (xbit + w + 31) >> 5;
602 int ny;
603
604 for (ny = h; ny > 0; row += raster, --ny) {
605 int nx = nw;
606 bits32 *pw = (bits32 *) row;
607
608 do {
609 bits32 w = *pw;
610
611 *pw++ = (w >> 24) + ((w >> 8) & 0xff00) +
612 ((w & 0xff00) << 8) + (w << 24);
613 }
614 while (--nx);
615 }
616 }
617 }
618
619 /* Copy a word-oriented rectangle to the client, swapping bytes as needed. */
620 int
mem_word_get_bits_rectangle(gx_device * dev,const gs_int_rect * prect,gs_get_bits_params_t * params,gs_int_rect ** unread)621 mem_word_get_bits_rectangle(gx_device * dev, const gs_int_rect * prect,
622 gs_get_bits_params_t * params, gs_int_rect ** unread)
623 {
624 gx_device_memory * const mdev = (gx_device_memory *)dev;
625 byte *src;
626 uint dev_raster = gx_device_raster(dev, 1);
627 int x = prect->p.x;
628 int w = prect->q.x - x;
629 int y = prect->p.y;
630 int h = prect->q.y - y;
631 int bit_x, bit_w;
632 int code;
633
634 fit_fill_xywh(dev, x, y, w, h);
635 if (w <= 0 || h <= 0) {
636 /*
637 * It's easiest to just keep going with an empty rectangle.
638 * We pass the original rectangle to mem_get_bits_rectangle,
639 * so unread will be filled in correctly.
640 */
641 x = y = w = h = 0;
642 }
643 bit_x = x * dev->color_info.depth;
644 bit_w = w * dev->color_info.depth;
645 src = scan_line_base(mdev, y);
646 mem_swap_byte_rect(src, dev_raster, bit_x, bit_w, h, false);
647 code = mem_get_bits_rectangle(dev, prect, params, unread);
648 mem_swap_byte_rect(src, dev_raster, bit_x, bit_w, h, false);
649 return code;
650 }
651
652 #endif /* !arch_is_big_endian */
653
654 /* Map a r-g-b color to a color index for a mapped color memory device */
655 /* (2, 4, or 8 bits per pixel.) */
656 /* This requires searching the palette. */
657 gx_color_index
mem_mapped_map_rgb_color(gx_device * dev,const gx_color_value cv[])658 mem_mapped_map_rgb_color(gx_device * dev, const gx_color_value cv[])
659 {
660 gx_device_memory * const mdev = (gx_device_memory *)dev;
661 byte br = gx_color_value_to_byte(cv[0]);
662
663 register const byte *pptr = mdev->palette.data;
664 int cnt = mdev->palette.size;
665 const byte *which = 0; /* initialized only to pacify gcc */
666 int best = 256 * 3;
667
668 if (mdev->color_info.num_components != 1) {
669 /* not 1 component, assume three */
670 /* The comparison is rather simplistic, treating differences in */
671 /* all components as equal. Better choices would be 'distance' */
672 /* in HLS space or other, but these would be much slower. */
673 /* At least exact matches will be found. */
674 byte bg = gx_color_value_to_byte(cv[1]);
675 byte bb = gx_color_value_to_byte(cv[2]);
676
677 while ((cnt -= 3) >= 0) {
678 register int diff = *pptr - br;
679
680 if (diff < 0)
681 diff = -diff;
682 if (diff < best) { /* quick rejection */
683 int dg = pptr[1] - bg;
684
685 if (dg < 0)
686 dg = -dg;
687 if ((diff += dg) < best) { /* quick rejection */
688 int db = pptr[2] - bb;
689
690 if (db < 0)
691 db = -db;
692 if ((diff += db) < best)
693 which = pptr, best = diff;
694 }
695 }
696 if (diff == 0) /* can't get any better than 0 diff */
697 break;
698 pptr += 3;
699 }
700 } else {
701 /* Gray scale conversion. The palette is made of three equal */
702 /* components, so this case is simpler. */
703 while ((cnt -= 3) >= 0) {
704 register int diff = *pptr - br;
705
706 if (diff < 0)
707 diff = -diff;
708 if (diff < best) { /* quick rejection */
709 which = pptr, best = diff;
710 }
711 if (diff == 0)
712 break;
713 pptr += 3;
714 }
715 }
716 return (gx_color_index) ((which - mdev->palette.data) / 3);
717 }
718
719 /* Map a color index to a r-g-b color for a mapped color memory device. */
720 int
mem_mapped_map_color_rgb(gx_device * dev,gx_color_index color,gx_color_value prgb[3])721 mem_mapped_map_color_rgb(gx_device * dev, gx_color_index color,
722 gx_color_value prgb[3])
723 {
724 gx_device_memory * const mdev = (gx_device_memory *)dev;
725 const byte *pptr = mdev->palette.data + (int)color * 3;
726
727 prgb[0] = gx_color_value_from_byte(pptr[0]);
728 prgb[1] = gx_color_value_from_byte(pptr[1]);
729 prgb[2] = gx_color_value_from_byte(pptr[2]);
730 return 0;
731 }
732
733 /*
734 * Implement draw_thin_line using a distinguished procedure that serves
735 * as the common marker for all memory devices.
736 */
737 int
mem_draw_thin_line(gx_device * dev,fixed fx0,fixed fy0,fixed fx1,fixed fy1,const gx_drawing_color * pdcolor,gs_logical_operation_t lop,fixed adjustx,fixed adjusty)738 mem_draw_thin_line(gx_device *dev, fixed fx0, fixed fy0, fixed fx1, fixed fy1,
739 const gx_drawing_color *pdcolor,
740 gs_logical_operation_t lop,
741 fixed adjustx, fixed adjusty)
742 {
743 return gx_default_draw_thin_line(dev, fx0, fy0, fx1, fy1, pdcolor, lop,
744 adjustx, adjusty);
745 }
746