1 /*
2     RawSpeed - RAW file decoder.
3 
4     Copyright (C) 2009-2014 Klaus Post
5     Copyright (C) 2014-2015 Pedro Côrte-Real
6     Copyright (C) 2017-2019 Roman Lebedev
7     Copyright (C) 2019 Robert Bridge
8 
9     This library is free software; you can redistribute it and/or
10     modify it under the terms of the GNU Lesser General Public
11     License as published by the Free Software Foundation; either
12     version 2 of the License, or (at your option) any later version.
13 
14     This library is distributed in the hope that it will be useful,
15     but WITHOUT ANY WARRANTY; without even the implied warranty of
16     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17     Lesser General Public License for more details.
18 
19     You should have received a copy of the GNU Lesser General Public
20     License along with this library; if not, write to the Free Software
21     Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 */
23 
24 #include "decoders/IiqDecoder.h"
25 #include "common/Array2DRef.h"                  // for Array2DRef
26 #include "common/Mutex.h"                       // for MutexLocker
27 #include "common/Point.h"                       // for iPoint2D
28 #include "common/Spline.h"                      // for Spline, Spline<>::va...
29 #include "decoders/RawDecoder.h"                // for RawDecoder::(anonymous)
30 #include "decoders/RawDecoderException.h"       // for ThrowRDE
31 #include "decompressors/PhaseOneDecompressor.h" // for PhaseOneStrip, Phase...
32 #include "io/Buffer.h"                          // for Buffer, DataBuffer
33 #include "io/ByteStream.h"                      // for ByteStream
34 #include "io/Endianness.h"                      // for Endianness, Endianne...
35 #include "metadata/Camera.h"                    // for Camera
36 #include "metadata/CameraMetaData.h"            // for CameraMetaData
37 #include "metadata/ColorFilterArray.h"          // for ColorFilterArray
38 #include "tiff/TiffIFD.h"                       // for TiffID, TiffRootIFD
39 #include <algorithm>                            // for adjacent_find, gener...
40 #include <array>                                // for array, array<>::cons...
41 #include <cassert>                              // for assert
42 #include <cinttypes>                            // for PRIu64
43 #include <cmath>                                // for lround
44 #include <cstdlib>                              // for abs
45 #include <functional>                           // for greater_equal
46 #include <iterator>                             // for advance, next, begin
47 #include <memory>                               // for unique_ptr
48 #include <string>                               // for operator==, string
49 #include <utility>                              // for move
50 #include <vector>                               // for vector
51 
52 namespace rawspeed {
53 
isAppropriateDecoder(const Buffer & file)54 bool IiqDecoder::isAppropriateDecoder(const Buffer& file) {
55   const DataBuffer db(file, Endianness::little);
56 
57   // The IIQ magic. Is present for all IIQ raws.
58   return db.get<uint32_t>(8) == 0x49494949;
59 }
60 
isAppropriateDecoder(const TiffRootIFD * rootIFD,const Buffer & file)61 bool IiqDecoder::isAppropriateDecoder(const TiffRootIFD* rootIFD,
62                                       const Buffer& file) {
63   const auto id = rootIFD->getID();
64   const std::string& make = id.make;
65 
66   return IiqDecoder::isAppropriateDecoder(file) &&
67          (make == "Phase One A/S" || make == "Phase One" || make == "Leaf");
68 }
69 
70 // FIXME: this is very close to SamsungV0Decompressor::computeStripes()
71 std::vector<PhaseOneStrip>
computeSripes(const Buffer & raw_data,std::vector<IiqOffset> && offsets,uint32_t height)72 IiqDecoder::computeSripes(const Buffer& raw_data,
73                           std::vector<IiqOffset>&& offsets, uint32_t height) {
74   assert(height > 0);
75   assert(offsets.size() == (1 + height));
76 
77   ByteStream bs(DataBuffer(raw_data, Endianness::little));
78 
79   // so... here's the thing. offsets are not guaranteed to be in
80   // monotonically increasing order. so for each element of 'offsets',
81   // we need to find element which specifies next larger offset.
82   // and only then by subtracting those two offsets we get the slice size.
83 
84   std::sort(offsets.begin(), offsets.end(),
85             [](const IiqOffset& a, const IiqOffset& b) {
86               if (a.offset == b.offset && &a != &b)
87                 ThrowRDE("Two identical offsets found. Corrupt raw.");
88               return a.offset < b.offset;
89             });
90 
91   std::vector<PhaseOneStrip> slices;
92   slices.reserve(height);
93 
94   auto offset_iterator = std::begin(offsets);
95   bs.skipBytes(offset_iterator->offset);
96 
97   auto next_offset_iterator = std::next(offset_iterator);
98   while (next_offset_iterator < std::end(offsets)) {
99     assert(next_offset_iterator->offset > offset_iterator->offset);
100     const auto size = next_offset_iterator->offset - offset_iterator->offset;
101     assert(size > 0);
102 
103     slices.emplace_back(offset_iterator->n, bs.getStream(size));
104 
105     std::advance(offset_iterator, 1);
106     std::advance(next_offset_iterator, 1);
107   }
108 
109   assert(slices.size() == height);
110 
111   return slices;
112 }
113 
decodeRawInternal()114 RawImage IiqDecoder::decodeRawInternal() {
115   const Buffer buf(mFile.getSubView(8));
116   const DataBuffer db(buf, Endianness::little);
117   ByteStream bs(db);
118 
119   bs.skipBytes(4); // Phase One magic
120   bs.skipBytes(4); // padding?
121 
122   const auto origPos = bs.getPosition();
123 
124   const uint32_t entries_offset = bs.getU32();
125 
126   bs.setPosition(entries_offset);
127 
128   const uint32_t entries_count = bs.getU32();
129   bs.skipBytes(4); // ???
130 
131   // this is how much is to be read for all the entries
132   ByteStream es(bs.getStream(entries_count, 16));
133 
134   bs.setPosition(origPos);
135 
136   uint32_t width = 0;
137   uint32_t height = 0;
138   uint32_t split_row = 0;
139   uint32_t split_col = 0;
140 
141   Buffer raw_data;
142   ByteStream block_offsets;
143   ByteStream wb;
144   ByteStream correction_meta_data;
145 
146   for (uint32_t entry = 0; entry < entries_count; entry++) {
147     const uint32_t tag = es.getU32();
148     es.skipBytes(4); // type
149     const uint32_t len = es.getU32();
150     const uint32_t data = es.getU32();
151 
152     switch (tag) {
153     case 0x107:
154       wb = bs.getSubStream(data, len);
155       break;
156     case 0x108:
157       width = data;
158       break;
159     case 0x109:
160       height = data;
161       break;
162     case 0x10f:
163       raw_data = bs.getSubView(data, len);
164       break;
165     case 0x110:
166       correction_meta_data = bs.getSubStream(data);
167       break;
168     case 0x21c:
169       // they are not guaranteed to be sequential!
170       block_offsets = bs.getSubStream(data, len);
171       break;
172     case 0x21d:
173       black_level = data >> 2;
174       break;
175     case 0x222:
176       split_col = data;
177       break;
178     case 0x224:
179       split_row = data;
180       break;
181     default:
182       // FIXME: is there a "block_sizes" entry?
183       break;
184     }
185   }
186 
187   // FIXME: could be wrong. max "active pixels" in "Sensor+" mode - "101 MP"
188   if (width == 0 || height == 0 || width > 11976 || height > 8854)
189     ThrowRDE("Unexpected image dimensions found: (%u; %u)", width, height);
190 
191   if (split_col > width || split_row > height)
192     ThrowRDE("Invalid sensor quadrant split values (%u, %u)", split_row,
193              split_col);
194 
195   block_offsets = block_offsets.getStream(height, sizeof(uint32_t));
196 
197   std::vector<IiqOffset> offsets;
198   offsets.reserve(1 + height);
199 
200   for (uint32_t row = 0; row < height; row++)
201     offsets.emplace_back(row, block_offsets.getU32());
202 
203   // to simplify slice size calculation, we insert a dummy offset,
204   // which will be used much like end()
205   offsets.emplace_back(height, raw_data.getSize());
206 
207   std::vector<PhaseOneStrip> strips(
208       computeSripes(raw_data, std::move(offsets), height));
209 
210   mRaw->dim = iPoint2D(width, height);
211 
212   PhaseOneDecompressor p(mRaw, std::move(strips));
213   mRaw->createData();
214   p.decompress();
215 
216   if (correction_meta_data.getSize() != 0 && iiq)
217     CorrectPhaseOneC(correction_meta_data, split_row, split_col);
218 
219   for (int i = 0; i < 3; i++)
220     mRaw->metadata.wbCoeffs[i] = wb.getFloat();
221 
222   return mRaw;
223 }
224 
CorrectPhaseOneC(ByteStream meta_data,uint32_t split_row,uint32_t split_col)225 void IiqDecoder::CorrectPhaseOneC(ByteStream meta_data, uint32_t split_row,
226                                   uint32_t split_col) {
227   meta_data.skipBytes(8);
228   const uint32_t bytes_to_entries = meta_data.getU32();
229   meta_data.setPosition(bytes_to_entries);
230   const uint32_t entries_count = meta_data.getU32();
231   meta_data.skipBytes(4);
232 
233   // this is how much is to be read for all the entries
234   ByteStream entries(meta_data.getStream(entries_count, 12));
235   meta_data.setPosition(0);
236 
237   bool QuadrantMultipliersSeen = false;
238   bool SensorDefectsSeen = false;
239 
240   for (uint32_t entry = 0; entry < entries_count; entry++) {
241     const uint32_t tag = entries.getU32();
242     const uint32_t len = entries.getU32();
243     const uint32_t offset = entries.getU32();
244 
245     switch (tag) {
246     case 0x400: // Sensor Defects
247       if (SensorDefectsSeen)
248         ThrowRDE("Second sensor defects entry seen. Unexpected.");
249       correctSensorDefects(meta_data.getSubStream(offset, len));
250       SensorDefectsSeen = true;
251       break;
252     case 0x431:
253       if (QuadrantMultipliersSeen)
254         ThrowRDE("Second quadrant multipliers entry seen. Unexpected.");
255       if (iiq.quadrantMultipliers)
256         CorrectQuadrantMultipliersCombined(meta_data.getSubStream(offset, len),
257                                            split_row, split_col);
258       QuadrantMultipliersSeen = true;
259       break;
260     default:
261       break;
262     }
263   }
264 }
265 
266 // This method defines a correction that compensates for the fact that
267 // IIQ files may come from a camera with multiple (four, in this case)
268 // sensors combined into a single "sensor."  Because the different
269 // sensors may have slightly different responses, we need to multiply
270 // the pixels in each by a correction factor to ensure that they blend
271 // together smoothly.  The correction factor is not a single
272 // multiplier, but a curve defined by seven control points.  Each
273 // curve's control points share the same seven X-coordinates.
CorrectQuadrantMultipliersCombined(ByteStream data,uint32_t split_row,uint32_t split_col)274 void IiqDecoder::CorrectQuadrantMultipliersCombined(ByteStream data,
275                                                     uint32_t split_row,
276                                                     uint32_t split_col) {
277   std::array<uint32_t, 9> shared_x_coords;
278 
279   // Read the middle seven points from the file
280   std::generate_n(std::next(shared_x_coords.begin()), 7,
281                   [&data] { return data.getU32(); });
282 
283   // All the curves include (0, 0) and (65535, 65535),
284   // so the first and last points are predefined
285   shared_x_coords.front() = 0;
286   shared_x_coords.back() = 65535;
287 
288   // Check that the middle coordinates make sense.
289   if (std::adjacent_find(shared_x_coords.cbegin(), shared_x_coords.cend(),
290                          std::greater_equal<>()) != shared_x_coords.cend())
291     ThrowRDE("The X coordinates must all be strictly increasing");
292 
293   std::array<std::array<std::vector<iPoint2D>, 2>, 2> control_points;
294   for (auto& quadRow : control_points) {
295     for (auto& quadrant : quadRow) {
296       quadrant.reserve(9);
297       quadrant.emplace_back(0, 0);
298 
299       for (int i = 1; i < 8; i++) {
300         // These multipliers are expressed in ten-thousandths in the
301         // file
302         const uint64_t y_coord =
303             (uint64_t(data.getU32()) * shared_x_coords[i]) / 10000ULL;
304         if (y_coord > 65535)
305           ThrowRDE("The Y coordinate %" PRIu64 " is too large", y_coord);
306         quadrant.emplace_back(shared_x_coords[i], y_coord);
307       }
308 
309       quadrant.emplace_back(65535, 65535);
310       assert(quadrant.size() == 9);
311     }
312   }
313 
314   for (int quadRow = 0; quadRow < 2; quadRow++) {
315     for (int quadCol = 0; quadCol < 2; quadCol++) {
316       const Array2DRef<uint16_t> img(mRaw->getU16DataAsUncroppedArray2DRef());
317 
318       const Spline<> s(control_points[quadRow][quadCol]);
319       const std::vector<uint16_t> curve = s.calculateCurve();
320 
321       int row_start = quadRow == 0 ? 0 : split_row;
322       int row_end = quadRow == 0 ? split_row : img.height;
323       int col_start = quadCol == 0 ? 0 : split_col;
324       int col_end = quadCol == 0 ? split_col : img.width;
325 
326       for (int row = row_start; row < row_end; row++) {
327         for (int col = col_start; col < col_end; col++) {
328           uint16_t& pixel = img(row, col);
329           // This adjustment is expected to be made with the
330           // black-level already subtracted from the pixel values.
331           // Because this is kept as metadata and not subtracted at
332           // this point, to make the correction work we subtract the
333           // appropriate amount before indexing into the curve and
334           // then add it back so that subtracting the black level
335           // later will work as expected
336           const uint16_t diff = pixel < black_level ? pixel : black_level;
337           pixel = curve[pixel - diff] + diff;
338         }
339       }
340     }
341   }
342 }
343 
checkSupportInternal(const CameraMetaData * meta)344 void IiqDecoder::checkSupportInternal(const CameraMetaData* meta) {
345   checkCameraSupported(meta, mRootIFD->getID(), "");
346 
347   auto id = mRootIFD->getID();
348   const Camera* cam = meta->getCamera(id.make, id.model, mRaw->metadata.mode);
349   if (!cam)
350     ThrowRDE("Couldn't find camera %s %s", id.make.c_str(), id.model.c_str());
351 
352   mRaw->cfa = cam->cfa;
353 }
354 
decodeMetaDataInternal(const CameraMetaData * meta)355 void IiqDecoder::decodeMetaDataInternal(const CameraMetaData* meta) {
356   setMetaData(meta, "", 0);
357 
358   if (black_level)
359     mRaw->blackLevel = black_level;
360 }
361 
correctSensorDefects(ByteStream data)362 void IiqDecoder::correctSensorDefects(ByteStream data) {
363   while (data.getRemainSize() != 0) {
364     const uint16_t col = data.getU16();
365     const uint16_t row = data.getU16();
366     const uint16_t type = data.getU16();
367     data.skipBytes(2); // Ignore unknown/unused bits.
368 
369     if (col >= mRaw->dim.x) // Value for col is outside the raw image.
370       continue;
371     switch (type) {
372     case 131: // bad column
373     case 137: // bad column
374       correctBadColumn(col);
375       break;
376     case 129: // bad pixel
377       handleBadPixel(col, row);
378       break;
379     default: // Oooh, a sensor defect not in dcraw!
380       break;
381     }
382   }
383 }
384 
handleBadPixel(const uint16_t col,const uint16_t row)385 void IiqDecoder::handleBadPixel(const uint16_t col, const uint16_t row) {
386   MutexLocker guard(&mRaw->mBadPixelMutex);
387   mRaw->mBadPixelPositions.insert(mRaw->mBadPixelPositions.end(),
388                                   (static_cast<uint32_t>(row) << 16) + col);
389 }
390 
correctBadColumn(const uint16_t col)391 void IiqDecoder::correctBadColumn(const uint16_t col) {
392   const Array2DRef<uint16_t> img(mRaw->getU16DataAsUncroppedArray2DRef());
393 
394   for (int row = 2; row < mRaw->dim.y - 2; row++) {
395     if (mRaw->cfa.getColorAt(col, row) == CFA_GREEN) {
396       /* Do green pixels. Let's pretend we are in "G" pixel, in the middle:
397        *   G=G
398        *   BGB
399        *   G0G
400        * We accumulate the values 4 "G" pixels form diagonals, then check which
401        * of 4 values is most distant from the mean of those 4 values, subtract
402        * it from the sum, average (divide by 3) and round to nearest int.
403        */
404       int max = 0;
405       std::array<uint16_t, 4> val;
406       std::array<int32_t, 4> dev;
407       int32_t sum = 0;
408       sum += val[0] = img(row - 1, col - 1);
409       sum += val[1] = img(row + 1, col - 1);
410       sum += val[2] = img(row - 1, col + 1);
411       sum += val[3] = img(row + 1, col + 1);
412       for (int i = 0; i < 4; i++) {
413         dev[i] = std::abs((val[i] * 4) - sum);
414         if (dev[max] < dev[i])
415           max = i;
416       }
417       const int three_pixels = sum - val[max];
418       // This is `std::lround(three_pixels / 3.0)`, but without FP.
419       img(row, col) = (three_pixels + 1) / 3;
420     } else {
421       /*
422        * Do non-green pixels. Let's pretend we are in "R" pixel, in the middle:
423        *   RG=GR
424        *   GB=BG
425        *   RGRGR
426        *   GB0BG
427        *   RG0GR
428        * We have 6 other "R" pixels - 2 by horizontal, 4 by diagonals.
429        * We need to combine them, to get the value of the pixel we are in.
430        */
431       uint32_t diags = img(row + 2, col - 2) + img(row - 2, col - 2) +
432                        img(row + 2, col + 2) + img(row - 2, col + 2);
433       uint32_t horiz = img(row, col - 2) + img(row, col + 2);
434       // But this is not just averaging, we bias towards the horizontal pixels.
435       img(row, col) = std::lround(diags * 0.0732233 + horiz * 0.3535534);
436     }
437   }
438 }
439 
440 } // namespace rawspeed
441