1 use crate::color::Rgb;
2 use crate::error::ImageResult;
3 use crate::hdr::{rgbe8, Rgbe8Pixel, SIGNATURE};
4 use std::io::{Result, Write};
5 use std::cmp::Ordering;
6
7 /// Radiance HDR encoder
8 pub struct HdrEncoder<W: Write> {
9 w: W,
10 }
11
12 /// HDR Encoder
13 ///
14 /// An alias of [`HdrEncoder`].
15 ///
16 /// TODO: remove
17 ///
18 /// [`HdrEncoder`]: struct.HdrEncoder.html
19 #[allow(dead_code)]
20 #[deprecated(note = "Use `HdrEncoder` instead")]
21 pub type HDREncoder<R> = HdrEncoder<R>;
22
23 impl<W: Write> HdrEncoder<W> {
24 /// Creates encoder
new(w: W) -> HdrEncoder<W>25 pub fn new(w: W) -> HdrEncoder<W> {
26 HdrEncoder { w }
27 }
28
29 /// Encodes the image ```data```
30 /// that has dimensions ```width``` and ```height```
encode(mut self, data: &[Rgb<f32>], width: usize, height: usize) -> ImageResult<()>31 pub fn encode(mut self, data: &[Rgb<f32>], width: usize, height: usize) -> ImageResult<()> {
32 assert!(data.len() >= width * height);
33 let w = &mut self.w;
34 w.write_all(SIGNATURE)?;
35 w.write_all(b"\n")?;
36 w.write_all(b"# Rust HDR encoder\n")?;
37 w.write_all(b"FORMAT=32-bit_rle_rgbe\n\n")?;
38 w.write_all(format!("-Y {} +X {}\n", height, width).as_bytes())?;
39
40 if width < 8 || width > 32_768 {
41 for &pix in data {
42 write_rgbe8(w, to_rgbe8(pix))?;
43 }
44 } else {
45 // new RLE marker contains scanline width
46 let marker = rgbe8(2, 2, (width / 256) as u8, (width % 256) as u8);
47 // buffers for encoded pixels
48 let mut bufr = vec![0; width];
49 let mut bufg = vec![0; width];
50 let mut bufb = vec![0; width];
51 let mut bufe = vec![0; width];
52 let mut rle_buf = vec![0; width];
53 for scanline in data.chunks(width) {
54 for ((((r, g), b), e), &pix) in bufr.iter_mut()
55 .zip(bufg.iter_mut())
56 .zip(bufb.iter_mut())
57 .zip(bufe.iter_mut())
58 .zip(scanline.iter())
59 {
60 let cp = to_rgbe8(pix);
61 *r = cp.c[0];
62 *g = cp.c[1];
63 *b = cp.c[2];
64 *e = cp.e;
65 }
66 write_rgbe8(w, marker)?; // New RLE encoding marker
67 rle_buf.clear();
68 rle_compress(&bufr[..], &mut rle_buf);
69 w.write_all(&rle_buf[..])?;
70 rle_buf.clear();
71 rle_compress(&bufg[..], &mut rle_buf);
72 w.write_all(&rle_buf[..])?;
73 rle_buf.clear();
74 rle_compress(&bufb[..], &mut rle_buf);
75 w.write_all(&rle_buf[..])?;
76 rle_buf.clear();
77 rle_compress(&bufe[..], &mut rle_buf);
78 w.write_all(&rle_buf[..])?;
79 }
80 }
81 Ok(())
82 }
83 }
84
85 #[derive(Debug, PartialEq, Eq)]
86 enum RunOrNot {
87 Run(u8, usize),
88 Norun(usize, usize),
89 }
90 use self::RunOrNot::{Norun, Run};
91
92 const RUN_MAX_LEN: usize = 127;
93 const NORUN_MAX_LEN: usize = 128;
94
95 struct RunIterator<'a> {
96 data: &'a [u8],
97 curidx: usize,
98 }
99
100 impl<'a> RunIterator<'a> {
new(data: &'a [u8]) -> RunIterator<'a>101 fn new(data: &'a [u8]) -> RunIterator<'a> {
102 RunIterator { data, curidx: 0 }
103 }
104 }
105
106 impl<'a> Iterator for RunIterator<'a> {
107 type Item = RunOrNot;
108
next(&mut self) -> Option<Self::Item>109 fn next(&mut self) -> Option<Self::Item> {
110 if self.curidx == self.data.len() {
111 None
112 } else {
113 let cv = self.data[self.curidx];
114 let crun = self.data[self.curidx..]
115 .iter()
116 .take_while(|&&v| v == cv)
117 .take(RUN_MAX_LEN)
118 .count();
119 let ret = if crun > 2 {
120 Run(cv, crun)
121 } else {
122 Norun(self.curidx, crun)
123 };
124 self.curidx += crun;
125 Some(ret)
126 }
127 }
128 }
129
130 struct NorunCombineIterator<'a> {
131 runiter: RunIterator<'a>,
132 prev: Option<RunOrNot>,
133 }
134
135 impl<'a> NorunCombineIterator<'a> {
new(data: &'a [u8]) -> NorunCombineIterator<'a>136 fn new(data: &'a [u8]) -> NorunCombineIterator<'a> {
137 NorunCombineIterator {
138 runiter: RunIterator::new(data),
139 prev: None,
140 }
141 }
142 }
143
144 // Combines sequential noruns produced by RunIterator
145 impl<'a> Iterator for NorunCombineIterator<'a> {
146 type Item = RunOrNot;
next(&mut self) -> Option<Self::Item>147 fn next(&mut self) -> Option<Self::Item> {
148 loop {
149 match self.prev.take() {
150 Some(Run(c, len)) => {
151 // Just return stored run
152 return Some(Run(c, len));
153 }
154 Some(Norun(idx, len)) => {
155 // Let's see if we need to continue norun
156 match self.runiter.next() {
157 Some(Norun(_, len1)) => {
158 // norun continues
159 let clen = len + len1; // combined length
160 match clen.cmp(&NORUN_MAX_LEN) {
161 Ordering::Equal => return Some(Norun(idx, clen)),
162 Ordering::Greater => {
163 // combined norun exceeds maximum length. store extra part of norun
164 self.prev = Some(Norun(idx + NORUN_MAX_LEN, clen - NORUN_MAX_LEN));
165 // then return maximal norun
166 return Some(Norun(idx, NORUN_MAX_LEN));
167 }
168 Ordering::Less => {
169 // len + len1 < NORUN_MAX_LEN
170 self.prev = Some(Norun(idx, len + len1));
171 // combine and continue loop
172 }
173 }
174 }
175 Some(Run(c, len1)) => {
176 // Run encountered. Store it
177 self.prev = Some(Run(c, len1));
178 return Some(Norun(idx, len)); // and return combined norun
179 }
180 None => {
181 // End of sequence
182 return Some(Norun(idx, len)); // return combined norun
183 }
184 }
185 } // End match self.prev.take() == Some(NoRun())
186 None => {
187 // No norun to combine
188 match self.runiter.next() {
189 Some(Norun(idx, len)) => {
190 self.prev = Some(Norun(idx, len));
191 // store for combine and continue the loop
192 }
193 Some(Run(c, len)) => {
194 // Some run. Just return it
195 return Some(Run(c, len));
196 }
197 None => {
198 // That's all, folks
199 return None;
200 }
201 }
202 } // End match self.prev.take() == None
203 } // End match
204 } // End loop
205 }
206 }
207
208 // Appends RLE compressed ```data``` to ```rle```
rle_compress(data: &[u8], rle: &mut Vec<u8>)209 fn rle_compress(data: &[u8], rle: &mut Vec<u8>) {
210 rle.clear();
211 if data.is_empty() {
212 rle.push(0); // Technically correct. It means read next 0 bytes.
213 return;
214 }
215 // Task: split data into chunks of repeating (max 127) and non-repeating bytes (max 128)
216 // Prepend non-repeating chunk with its length
217 // Replace repeating byte with (run length + 128) and the byte
218 for rnr in NorunCombineIterator::new(data) {
219 match rnr {
220 Run(c, len) => {
221 assert!(len <= 127);
222 rle.push(128u8 + len as u8);
223 rle.push(c);
224 }
225 Norun(idx, len) => {
226 assert!(len <= 128);
227 rle.push(len as u8);
228 rle.extend_from_slice(&data[idx..idx + len]);
229 }
230 }
231 }
232 }
233
write_rgbe8<W: Write>(w: &mut W, v: Rgbe8Pixel) -> Result<()>234 fn write_rgbe8<W: Write>(w: &mut W, v: Rgbe8Pixel) -> Result<()> {
235 w.write_all(&[v.c[0], v.c[1], v.c[2], v.e])
236 }
237
238 /// Converts ```Rgb<f32>``` into ```RGBE8Pixel```
to_rgbe8(pix: Rgb<f32>) -> Rgbe8Pixel239 pub fn to_rgbe8(pix: Rgb<f32>) -> Rgbe8Pixel {
240 let pix = pix.0;
241 let mx = f32::max(pix[0], f32::max(pix[1], pix[2]));
242 if mx <= 0.0 {
243 Rgbe8Pixel { c: [0, 0, 0], e: 0 }
244 } else {
245 // let (frac, exp) = mx.frexp(); // unstable yet
246 let exp = mx.log2().floor() as i32 + 1;
247 let mul = f32::powi(2.0, exp);
248 let mut conv = [0u8; 3];
249 for (cv, &sv) in conv.iter_mut().zip(pix.iter()) {
250 *cv = f32::trunc(sv / mul * 256.0) as u8;
251 }
252 Rgbe8Pixel {
253 c: conv,
254 e: (exp + 128) as u8,
255 }
256 }
257 }
258
259 #[test]
to_rgbe8_test()260 fn to_rgbe8_test() {
261 use crate::hdr::rgbe8;
262 let test_cases = vec![rgbe8(0, 0, 0, 0), rgbe8(1, 1, 128, 128)];
263 for &pix in &test_cases {
264 assert_eq!(pix, to_rgbe8(pix.to_hdr()));
265 }
266 for mc in 128..255 {
267 // TODO: use inclusive range when stable
268 let pix = rgbe8(mc, mc, mc, 100);
269 assert_eq!(pix, to_rgbe8(pix.to_hdr()));
270 let pix = rgbe8(mc, 0, mc, 130);
271 assert_eq!(pix, to_rgbe8(pix.to_hdr()));
272 let pix = rgbe8(0, 0, mc, 140);
273 assert_eq!(pix, to_rgbe8(pix.to_hdr()));
274 let pix = rgbe8(1, 0, mc, 150);
275 assert_eq!(pix, to_rgbe8(pix.to_hdr()));
276 let pix = rgbe8(1, mc, 10, 128);
277 assert_eq!(pix, to_rgbe8(pix.to_hdr()));
278 for c in 0..255 {
279 // Radiance HDR seems to be pre IEEE 754.
280 // exponent can be -128 (represented as 0u8), so some colors cannot be represented in normalized f32
281 // Let's exclude exponent value of -128 (0u8) from testing
282 let pix = rgbe8(1, mc, c, if c == 0 { 1 } else { c });
283 assert_eq!(pix, to_rgbe8(pix.to_hdr()));
284 }
285 }
286 fn relative_dist(a: Rgb<f32>, b: Rgb<f32>) -> f32 {
287 // maximal difference divided by maximal value
288 let max_diff = a.0
289 .iter()
290 .zip(b.0.iter())
291 .fold(0.0, |diff, (&a, &b)| f32::max(diff, (a - b).abs()));
292 let max_val = a.0
293 .iter()
294 .chain(b.0.iter())
295 .fold(0.0, |maxv, &a| f32::max(maxv, a));
296 if max_val == 0.0 {
297 0.0
298 } else {
299 max_diff / max_val
300 }
301 }
302 let test_values = vec![
303 0.000_001, 0.000_02, 0.000_3, 0.004, 0.05, 0.6, 7.0, 80.0, 900.0, 1_000.0, 20_000.0,
304 300_000.0,
305 ];
306 for &r in &test_values {
307 for &g in &test_values {
308 for &b in &test_values {
309 let c1 = Rgb([r, g, b]);
310 let c2 = to_rgbe8(c1).to_hdr();
311 let rel_dist = relative_dist(c1, c2);
312 // Maximal value is normalized to the range 128..256, thus we have 1/128 precision
313 assert!(
314 rel_dist <= 1.0 / 128.0,
315 "Relative distance ({}) exceeds 1/128 for {:?} and {:?}",
316 rel_dist,
317 c1,
318 c2
319 );
320 }
321 }
322 }
323 }
324
325 #[test]
runiterator_test()326 fn runiterator_test() {
327 let data = [];
328 let mut run_iter = RunIterator::new(&data[..]);
329 assert_eq!(run_iter.next(), None);
330 let data = [5];
331 let mut run_iter = RunIterator::new(&data[..]);
332 assert_eq!(run_iter.next(), Some(Norun(0, 1)));
333 assert_eq!(run_iter.next(), None);
334 let data = [1, 1];
335 let mut run_iter = RunIterator::new(&data[..]);
336 assert_eq!(run_iter.next(), Some(Norun(0, 2)));
337 assert_eq!(run_iter.next(), None);
338 let data = [0, 0, 0];
339 let mut run_iter = RunIterator::new(&data[..]);
340 assert_eq!(run_iter.next(), Some(Run(0u8, 3)));
341 assert_eq!(run_iter.next(), None);
342 let data = [0, 0, 1, 1];
343 let mut run_iter = RunIterator::new(&data[..]);
344 assert_eq!(run_iter.next(), Some(Norun(0, 2)));
345 assert_eq!(run_iter.next(), Some(Norun(2, 2)));
346 assert_eq!(run_iter.next(), None);
347 let data = [0, 0, 0, 1, 1];
348 let mut run_iter = RunIterator::new(&data[..]);
349 assert_eq!(run_iter.next(), Some(Run(0u8, 3)));
350 assert_eq!(run_iter.next(), Some(Norun(3, 2)));
351 assert_eq!(run_iter.next(), None);
352 let data = [1, 2, 2, 2];
353 let mut run_iter = RunIterator::new(&data[..]);
354 assert_eq!(run_iter.next(), Some(Norun(0, 1)));
355 assert_eq!(run_iter.next(), Some(Run(2u8, 3)));
356 assert_eq!(run_iter.next(), None);
357 let data = [1, 1, 2, 2, 2];
358 let mut run_iter = RunIterator::new(&data[..]);
359 assert_eq!(run_iter.next(), Some(Norun(0, 2)));
360 assert_eq!(run_iter.next(), Some(Run(2u8, 3)));
361 assert_eq!(run_iter.next(), None);
362 let data = [2; 128];
363 let mut run_iter = RunIterator::new(&data[..]);
364 assert_eq!(run_iter.next(), Some(Run(2u8, 127)));
365 assert_eq!(run_iter.next(), Some(Norun(127, 1)));
366 assert_eq!(run_iter.next(), None);
367 let data = [2; 129];
368 let mut run_iter = RunIterator::new(&data[..]);
369 assert_eq!(run_iter.next(), Some(Run(2u8, 127)));
370 assert_eq!(run_iter.next(), Some(Norun(127, 2)));
371 assert_eq!(run_iter.next(), None);
372 let data = [2; 130];
373 let mut run_iter = RunIterator::new(&data[..]);
374 assert_eq!(run_iter.next(), Some(Run(2u8, 127)));
375 assert_eq!(run_iter.next(), Some(Run(2u8, 3)));
376 assert_eq!(run_iter.next(), None);
377 }
378
379 #[test]
noruncombine_test()380 fn noruncombine_test() {
381 fn a<T>(mut v: Vec<T>, mut other: Vec<T>) -> Vec<T> {
382 v.append(&mut other);
383 v
384 }
385
386 let v = vec![];
387 let mut rsi = NorunCombineIterator::new(&v[..]);
388 assert_eq!(rsi.next(), None);
389
390 let v = vec![1];
391 let mut rsi = NorunCombineIterator::new(&v[..]);
392 assert_eq!(rsi.next(), Some(Norun(0, 1)));
393 assert_eq!(rsi.next(), None);
394
395 let v = vec![2, 2];
396 let mut rsi = NorunCombineIterator::new(&v[..]);
397 assert_eq!(rsi.next(), Some(Norun(0, 2)));
398 assert_eq!(rsi.next(), None);
399
400 let v = vec![3, 3, 3];
401 let mut rsi = NorunCombineIterator::new(&v[..]);
402 assert_eq!(rsi.next(), Some(Run(3, 3)));
403 assert_eq!(rsi.next(), None);
404
405 let v = vec![4, 4, 3, 3, 3];
406 let mut rsi = NorunCombineIterator::new(&v[..]);
407 assert_eq!(rsi.next(), Some(Norun(0, 2)));
408 assert_eq!(rsi.next(), Some(Run(3, 3)));
409 assert_eq!(rsi.next(), None);
410
411 let v = vec![40; 400];
412 let mut rsi = NorunCombineIterator::new(&v[..]);
413 assert_eq!(rsi.next(), Some(Run(40, 127)));
414 assert_eq!(rsi.next(), Some(Run(40, 127)));
415 assert_eq!(rsi.next(), Some(Run(40, 127)));
416 assert_eq!(rsi.next(), Some(Run(40, 19)));
417 assert_eq!(rsi.next(), None);
418
419 let v = a(a(vec![5; 3], vec![6; 129]), vec![7, 3, 7, 10, 255]);
420 let mut rsi = NorunCombineIterator::new(&v[..]);
421 assert_eq!(rsi.next(), Some(Run(5, 3)));
422 assert_eq!(rsi.next(), Some(Run(6, 127)));
423 assert_eq!(rsi.next(), Some(Norun(130, 7)));
424 assert_eq!(rsi.next(), None);
425
426 let v = a(a(vec![5; 2], vec![6; 129]), vec![7, 3, 7, 7, 255]);
427 let mut rsi = NorunCombineIterator::new(&v[..]);
428 assert_eq!(rsi.next(), Some(Norun(0, 2)));
429 assert_eq!(rsi.next(), Some(Run(6, 127)));
430 assert_eq!(rsi.next(), Some(Norun(129, 7)));
431 assert_eq!(rsi.next(), None);
432
433 let v: Vec<_> = ::std::iter::repeat(())
434 .flat_map(|_| (0..2))
435 .take(257)
436 .collect();
437 let mut rsi = NorunCombineIterator::new(&v[..]);
438 assert_eq!(rsi.next(), Some(Norun(0, 128)));
439 assert_eq!(rsi.next(), Some(Norun(128, 128)));
440 assert_eq!(rsi.next(), Some(Norun(256, 1)));
441 assert_eq!(rsi.next(), None);
442 }
443