1 //! Utility functions
2 use std::iter::{repeat, StepBy};
3 use std::ops::Range;
4
5 #[inline(always)]
6 pub fn unpack_bits<F>(buf: &mut [u8], channels: usize, bit_depth: u8, func: F)
7 where
8 F: Fn(u8, &mut [u8]),
9 {
10 // Return early if empty. This enables to subtract `channels` later without overflow.
11 if buf.len() < channels {
12 return;
13 }
14
15 let bits = buf.len() / channels * bit_depth as usize;
16 let extra_bits = bits % 8;
17 let entries = bits / 8
18 + match extra_bits {
19 0 => 0,
20 _ => 1,
21 };
22 let skip = match extra_bits {
23 0 => 0,
24 n => (8 - n) / bit_depth as usize,
25 };
26 let mask = ((1u16 << bit_depth) - 1) as u8;
27 let i = (0..entries)
28 .rev() // reverse iterator
29 .flat_map(|idx|
30 // this has to be reversed too
31 (0..8).step_by(bit_depth.into())
32 .zip(repeat(idx)))
33 .skip(skip);
34 let j = (0..=buf.len() - channels).rev().step_by(channels);
35 for ((shift, i), j) in i.zip(j) {
36 let pixel = (buf[i] & (mask << shift)) >> shift;
37 func(pixel, &mut buf[j..(j + channels)])
38 }
39 }
40
41 pub fn expand_trns_line(buf: &mut [u8], trns: &[u8], channels: usize) {
42 // Return early if empty. This enables to subtract `channels` later without overflow.
43 if buf.len() < (channels + 1) {
44 return;
45 }
46
47 let i = (0..=buf.len() / (channels + 1) * channels - channels)
48 .rev()
49 .step_by(channels);
50 let j = (0..=buf.len() - (channels + 1)).rev().step_by(channels + 1);
51 for (i, j) in i.zip(j) {
52 let i_pixel = i;
53 let j_chunk = j;
54 if &buf[i_pixel..i_pixel + channels] == trns {
AffineDataCopyGeneration__anon4670f7c80111::AffineDataCopyGeneration55 buf[j_chunk + channels] = 0
56 } else {
57 buf[j_chunk + channels] = 0xFF
58 }
59 for k in (0..channels).rev() {
60 buf[j_chunk + k] = buf[i_pixel + k];
61 }
62 }
63 }
64
65 pub fn expand_trns_line16(buf: &mut [u8], trns: &[u8], channels: usize) {
66 let c2 = 2 * channels;
67 // Return early if empty. This enables to subtract `channels` later without overflow.
68 if buf.len() < (c2 + 2) {
69 return;
70 }
71
72 let i = (0..=buf.len() / (c2 + 2) * c2 - c2).rev().step_by(c2);
73 let j = (0..=buf.len() - (c2 + 2)).rev().step_by(c2 + 2);
74 for (i, j) in i.zip(j) {
75 let i_pixel = i;
76 let j_chunk = j;
77 if &buf[i_pixel..i_pixel + c2] == trns {
78 buf[j_chunk + c2] = 0;
79 buf[j_chunk + c2 + 1] = 0
createAffineDataCopyGenerationPass(unsigned slowMemorySpace,unsigned fastMemorySpace,unsigned tagMemorySpace,int minDmaTransferSize,uint64_t fastMemCapacityBytes)80 } else {
81 buf[j_chunk + c2] = 0xFF;
82 buf[j_chunk + c2 + 1] = 0xFF
83 }
84 for k in (0..c2).rev() {
85 buf[j_chunk + k] = buf[i_pixel + k];
86 }
87 }
createAffineDataCopyGenerationPass()88 }
89
90 /// This iterator iterates over the different passes of an image Adam7 encoded
91 /// PNG image
92 /// The pattern is:
93 /// 16462646
94 /// 77777777
95 /// 56565656
96 /// 77777777
97 /// 36463646
98 /// 77777777
99 /// 56565656
100 /// 77777777
101 ///
102 #[derive(Clone)]
103 pub struct Adam7Iterator {
104 line: u32,
105 lines: u32,
106 line_width: u32,
107 current_pass: u8,
108 width: u32,
109 height: u32,
110 }
111
112 impl Adam7Iterator {
113 pub fn new(width: u32, height: u32) -> Adam7Iterator {
114 let mut this = Adam7Iterator {
115 line: 0,
116 lines: 0,
117 line_width: 0,
118 current_pass: 1,
119 width,
120 height,
121 };
122 this.init_pass();
__anon4670f7c80202(Operation &op) 123 this
124 }
125
126 /// Calculates the bounds of the current pass
127 fn init_pass(&mut self) {
128 let w = f64::from(self.width);
129 let h = f64::from(self.height);
130 let (line_width, lines) = match self.current_pass {
131 1 => (w / 8.0, h / 8.0),
132 2 => ((w - 4.0) / 8.0, h / 8.0),
133 3 => (w / 4.0, (h - 4.0) / 8.0),
134 4 => ((w - 2.0) / 4.0, h / 4.0),
135 5 => (w / 2.0, (h - 2.0) / 4.0),
136 6 => ((w - 1.0) / 2.0, h / 2.0),
137 7 => (w, (h - 1.0) / 2.0),
138 _ => unreachable!(),
139 };
140 self.line_width = line_width.ceil() as u32;
141 self.lines = lines.ceil() as u32;
142 self.line = 0;
143 }
144
145 /// The current pass#.
146 pub fn current_pass(&self) -> u8 {
147 self.current_pass
148 }
149 }
150
151 /// Iterates over the (passes, lines, widths)
152 impl Iterator for Adam7Iterator {
153 type Item = (u8, u32, u32);
154 fn next(&mut self) -> Option<(u8, u32, u32)> {
155 if self.line < self.lines && self.line_width > 0 {
156 let this_line = self.line;
157 self.line += 1;
158 Some((self.current_pass, this_line, self.line_width))
159 } else if self.current_pass < 7 {
160 self.current_pass += 1;
161 self.init_pass();
162 self.next()
163 } else {
164 None
165 }
166 }
167 }
168
169 fn subbyte_pixels<'a>(scanline: &'a [u8], bits_pp: usize) -> impl Iterator<Item = u8> + 'a {
170 (0..scanline.len() * 8)
171 .step_by(bits_pp)
172 .map(move |bit_idx| {
__anon4670f7c80402(Operation &op) 173 let byte_idx = bit_idx / 8;
174
175 // sub-byte samples start in the high-order bits
176 let rem = 8 - bit_idx % 8 - bits_pp;
177
178 match bits_pp {
179 // evenly divides bytes
180 1 => (scanline[byte_idx] >> rem) & 1,
181 2 => (scanline[byte_idx] >> rem) & 3,
182 4 => (scanline[byte_idx] >> rem) & 15,
183 _ => unreachable!(),
184 }
185 })
186 }
187
188 /// Given pass, image width, and line number, produce an iterator of bit positions of pixels to copy
189 /// from the input scanline to the image buffer.
190 fn expand_adam7_bits(
191 pass: u8,
192 width: usize,
193 line_no: usize,
194 bits_pp: usize,
195 ) -> StepBy<Range<usize>> {
196 let (line_mul, line_off, samp_mul, samp_off) = match pass {
197 1 => (8, 0, 8, 0),
198 2 => (8, 0, 8, 4),
runOnFunction()199 3 => (8, 4, 4, 0),
200 4 => (4, 0, 4, 2),
201 5 => (4, 2, 2, 0),
202 6 => (2, 0, 2, 1),
203 7 => (2, 1, 1, 0),
204 _ => panic!("Adam7 pass out of range: {}", pass),
205 };
206
207 // the equivalent line number in progressive scan
208 let prog_line = line_mul * line_no + line_off;
209 // line width is rounded up to the next byte
210 let line_width = (width * bits_pp + 7) & !7;
211 let line_start = prog_line * line_width;
212 let start = line_start + (samp_off * bits_pp);
213 let stop = line_start + (width * bits_pp);
214
215 (start..stop).step_by(bits_pp * samp_mul)
216 }
217
218 /// Expands an Adam 7 pass
219 pub fn expand_pass(
__anon4670f7c80502(Operation *op) 220 img: &mut [u8],
221 width: u32,
222 scanline: &[u8],
223 pass: u8,
224 line_no: u32,
225 bits_pp: u8,
226 ) {
227 let width = width as usize;
228 let line_no = line_no as usize;
229 let bits_pp = bits_pp as usize;
230
231 // pass is out of range but don't blow up
232 if pass == 0 || pass > 7 {
233 return;
234 }
235
236 let bit_indices = expand_adam7_bits(pass, width, line_no, bits_pp);
237
238 if bits_pp < 8 {
239 for (pos, px) in bit_indices.zip(subbyte_pixels(scanline, bits_pp)) {
240 let rem = 8 - pos % 8 - bits_pp;
241 img[pos / 8] |= px << rem as u8;
242 }
243 } else {
244 let bytes_pp = bits_pp / 8;
245
246 for (bitpos, px) in bit_indices.zip(scanline.chunks(bytes_pp)) {
247 for (offset, val) in px.iter().enumerate() {
248 img[bitpos / 8 + offset] = *val;
249 }
250 }
251 }
252 }
253
254 #[test]
255 fn test_adam7() {
256 /*
257 1646
258 7777
259 5656
260 7777
261 */
262 let it = Adam7Iterator::new(4, 4);
263 let passes: Vec<_> = it.collect();
264 assert_eq!(
265 &*passes,
266 &[
267 (1, 0, 1),
268 (4, 0, 1),
269 (5, 0, 2),
270 (6, 0, 2),
271 (6, 1, 2),
272 (7, 0, 4),
273 (7, 1, 4)
274 ]
275 );
276 }
277
278 #[test]
279 fn test_subbyte_pixels() {
280 let scanline = &[0b10101010, 0b10101010];
281
282 let pixels = subbyte_pixels(scanline, 1).collect::<Vec<_>>();
283 assert_eq!(pixels.len(), 16);
284 assert_eq!(pixels, [1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0]);
285 }
286
287 #[test]
288 fn test_expand_adam7_bits() {
289 let width = 32;
290 let bits_pp = 1;
291
292 let expected = |offset: usize, step: usize, count: usize| {
293 (0..count)
294 .map(move |i| step * i + offset)
295 .collect::<Vec<_>>()
296 };
297
298 for line_no in 0..8 {
299 let start = 8 * line_no * width;
300
301 assert_eq!(
302 expand_adam7_bits(1, width, line_no, bits_pp).collect::<Vec<_>>(),
303 expected(start, 8, 4)
304 );
305
306 let start = start + 4;
307
308 assert_eq!(
309 expand_adam7_bits(2, width, line_no, bits_pp).collect::<Vec<_>>(),
310 expected(start, 8, 4)
311 );
312
313 let start = (8 * line_no + 4) as usize * width as usize;
314
315 assert_eq!(
316 expand_adam7_bits(3, width, line_no, bits_pp).collect::<Vec<_>>(),
317 expected(start, 4, 8)
318 );
319 }
320
321 for line_no in 0..16 {
322 let start = 4 * line_no * width + 2;
323
324 assert_eq!(
325 expand_adam7_bits(4, width, line_no, bits_pp).collect::<Vec<_>>(),
326 expected(start, 4, 8)
327 );
328
329 let start = (4 * line_no + 2) * width;
330
331 assert_eq!(
332 expand_adam7_bits(5, width, line_no, bits_pp).collect::<Vec<_>>(),
333 expected(start, 2, 16)
334 )
335 }
336
337 for line_no in 0..32 {
338 let start = 2 * line_no * width + 1;
339
340 assert_eq!(
341 expand_adam7_bits(6, width, line_no, bits_pp).collect::<Vec<_>>(),
342 expected(start, 2, 16),
343 "line_no: {}",
344 line_no
345 );
346
347 let start = (2 * line_no + 1) * width;
348
349 assert_eq!(
350 expand_adam7_bits(7, width, line_no, bits_pp).collect::<Vec<_>>(),
351 expected(start, 1, 32)
352 );
353 }
354 }
355
356 #[test]
357 fn test_expand_pass_subbyte() {
358 let mut img = [0u8; 8];
359 let width = 8;
360 let bits_pp = 1;
361
362 expand_pass(&mut img, width, &[0b10000000], 1, 0, bits_pp);
363 assert_eq!(img, [0b10000000u8, 0, 0, 0, 0, 0, 0, 0]);
364
365 expand_pass(&mut img, width, &[0b10000000], 2, 0, bits_pp);
366 assert_eq!(img, [0b10001000u8, 0, 0, 0, 0, 0, 0, 0]);
367
368 expand_pass(&mut img, width, &[0b11000000], 3, 0, bits_pp);
369 assert_eq!(img, [0b10001000u8, 0, 0, 0, 0b10001000, 0, 0, 0]);
370
371 expand_pass(&mut img, width, &[0b11000000], 4, 0, bits_pp);
372 assert_eq!(img, [0b10101010u8, 0, 0, 0, 0b10001000, 0, 0, 0]);
373
374 expand_pass(&mut img, width, &[0b11000000], 4, 1, bits_pp);
375 assert_eq!(img, [0b10101010u8, 0, 0, 0, 0b10101010, 0, 0, 0]);
376
377 expand_pass(&mut img, width, &[0b11110000], 5, 0, bits_pp);
378 assert_eq!(img, [0b10101010u8, 0, 0b10101010, 0, 0b10101010, 0, 0, 0]);
379
380 expand_pass(&mut img, width, &[0b11110000], 5, 1, bits_pp);
381 assert_eq!(
382 img,
383 [0b10101010u8, 0, 0b10101010, 0, 0b10101010, 0, 0b10101010, 0]
384 );
385
386 expand_pass(&mut img, width, &[0b11110000], 6, 0, bits_pp);
387 assert_eq!(
388 img,
389 [0b11111111u8, 0, 0b10101010, 0, 0b10101010, 0, 0b10101010, 0]
390 );
391
392 expand_pass(&mut img, width, &[0b11110000], 6, 1, bits_pp);
393 assert_eq!(
394 img,
395 [0b11111111u8, 0, 0b11111111, 0, 0b10101010, 0, 0b10101010, 0]
396 );
397
398 expand_pass(&mut img, width, &[0b11110000], 6, 2, bits_pp);
399 assert_eq!(
400 img,
401 [0b11111111u8, 0, 0b11111111, 0, 0b11111111, 0, 0b10101010, 0]
402 );
403
404 expand_pass(&mut img, width, &[0b11110000], 6, 3, bits_pp);
405 assert_eq!(
406 [0b11111111u8, 0, 0b11111111, 0, 0b11111111, 0, 0b11111111, 0],
407 img
408 );
409
410 expand_pass(&mut img, width, &[0b11111111], 7, 0, bits_pp);
411 assert_eq!(
412 [
413 0b11111111u8,
414 0b11111111,
415 0b11111111,
416 0,
417 0b11111111,
418 0,
419 0b11111111,
420 0
421 ],
422 img
423 );
424
425 expand_pass(&mut img, width, &[0b11111111], 7, 1, bits_pp);
426 assert_eq!(
427 [
428 0b11111111u8,
429 0b11111111,
430 0b11111111,
431 0b11111111,
432 0b11111111,
433 0,
434 0b11111111,
435 0
436 ],
437 img
438 );
439
440 expand_pass(&mut img, width, &[0b11111111], 7, 2, bits_pp);
441 assert_eq!(
442 [
443 0b11111111u8,
444 0b11111111,
445 0b11111111,
446 0b11111111,
447 0b11111111,
448 0b11111111,
449 0b11111111,
450 0
451 ],
452 img
453 );
454
455 expand_pass(&mut img, width, &[0b11111111], 7, 3, bits_pp);
456 assert_eq!(
457 [
458 0b11111111u8,
459 0b11111111,
460 0b11111111,
461 0b11111111,
462 0b11111111,
463 0b11111111,
464 0b11111111,
465 0b11111111
466 ],
467 img
468 );
469 }
470