1 // Copyright 2019 Joe Drago. All rights reserved.
2 // SPDX-License-Identifier: BSD-2-Clause
3 
4 #include "avif/internal.h"
5 
6 #include <assert.h>
7 #include <math.h>
8 #include <string.h>
9 
10 struct YUVBlock
11 {
12     float y;
13     float u;
14     float v;
15 };
16 
avifPrepareReformatState(const avifImage * image,const avifRGBImage * rgb,avifReformatState * state)17 static avifBool avifPrepareReformatState(const avifImage * image, const avifRGBImage * rgb, avifReformatState * state)
18 {
19     if ((image->depth != 8) && (image->depth != 10) && (image->depth != 12)) {
20         return AVIF_FALSE;
21     }
22     if ((rgb->depth != 8) && (rgb->depth != 10) && (rgb->depth != 12) && (rgb->depth != 16)) {
23         return AVIF_FALSE;
24     }
25 
26     // These matrix coefficients values are currently unsupported. Revise this list as more support is added.
27     //
28     // YCgCo performs limited-full range adjustment on R,G,B but the current implementation performs range adjustment
29     // on Y,U,V. So YCgCo with limited range is unsupported.
30     if ((image->matrixCoefficients == 3 /* CICP reserved */) ||
31         ((image->matrixCoefficients == AVIF_MATRIX_COEFFICIENTS_YCGCO) && (image->yuvRange == AVIF_RANGE_LIMITED)) ||
32         (image->matrixCoefficients == AVIF_MATRIX_COEFFICIENTS_BT2020_CL) ||
33         (image->matrixCoefficients == AVIF_MATRIX_COEFFICIENTS_SMPTE2085) ||
34         (image->matrixCoefficients == AVIF_MATRIX_COEFFICIENTS_CHROMA_DERIVED_CL) ||
35         (image->matrixCoefficients >= AVIF_MATRIX_COEFFICIENTS_ICTCP)) { // Note the >= catching "future" CICP values here too
36         return AVIF_FALSE;
37     }
38 
39     if ((image->matrixCoefficients == AVIF_MATRIX_COEFFICIENTS_IDENTITY) && (image->yuvFormat != AVIF_PIXEL_FORMAT_YUV444)) {
40         return AVIF_FALSE;
41     }
42 
43     if (image->yuvFormat == AVIF_PIXEL_FORMAT_NONE) {
44         return AVIF_FALSE;
45     }
46 
47     avifGetPixelFormatInfo(image->yuvFormat, &state->formatInfo);
48     avifCalcYUVCoefficients(image, &state->kr, &state->kg, &state->kb);
49     state->mode = AVIF_REFORMAT_MODE_YUV_COEFFICIENTS;
50 
51     if (image->matrixCoefficients == AVIF_MATRIX_COEFFICIENTS_IDENTITY) {
52         state->mode = AVIF_REFORMAT_MODE_IDENTITY;
53     } else if (image->matrixCoefficients == AVIF_MATRIX_COEFFICIENTS_YCGCO) {
54         state->mode = AVIF_REFORMAT_MODE_YCGCO;
55     }
56 
57     if (state->mode != AVIF_REFORMAT_MODE_YUV_COEFFICIENTS) {
58         state->kr = 0.0f;
59         state->kg = 0.0f;
60         state->kb = 0.0f;
61     }
62 
63     state->yuvChannelBytes = (image->depth > 8) ? 2 : 1;
64     state->rgbChannelBytes = (rgb->depth > 8) ? 2 : 1;
65     state->rgbChannelCount = avifRGBFormatChannelCount(rgb->format);
66     state->rgbPixelBytes = state->rgbChannelBytes * state->rgbChannelCount;
67 
68     switch (rgb->format) {
69         case AVIF_RGB_FORMAT_RGB:
70             state->rgbOffsetBytesR = state->rgbChannelBytes * 0;
71             state->rgbOffsetBytesG = state->rgbChannelBytes * 1;
72             state->rgbOffsetBytesB = state->rgbChannelBytes * 2;
73             state->rgbOffsetBytesA = 0;
74             break;
75         case AVIF_RGB_FORMAT_RGBA:
76             state->rgbOffsetBytesR = state->rgbChannelBytes * 0;
77             state->rgbOffsetBytesG = state->rgbChannelBytes * 1;
78             state->rgbOffsetBytesB = state->rgbChannelBytes * 2;
79             state->rgbOffsetBytesA = state->rgbChannelBytes * 3;
80             break;
81         case AVIF_RGB_FORMAT_ARGB:
82             state->rgbOffsetBytesA = state->rgbChannelBytes * 0;
83             state->rgbOffsetBytesR = state->rgbChannelBytes * 1;
84             state->rgbOffsetBytesG = state->rgbChannelBytes * 2;
85             state->rgbOffsetBytesB = state->rgbChannelBytes * 3;
86             break;
87         case AVIF_RGB_FORMAT_BGR:
88             state->rgbOffsetBytesB = state->rgbChannelBytes * 0;
89             state->rgbOffsetBytesG = state->rgbChannelBytes * 1;
90             state->rgbOffsetBytesR = state->rgbChannelBytes * 2;
91             state->rgbOffsetBytesA = 0;
92             break;
93         case AVIF_RGB_FORMAT_BGRA:
94             state->rgbOffsetBytesB = state->rgbChannelBytes * 0;
95             state->rgbOffsetBytesG = state->rgbChannelBytes * 1;
96             state->rgbOffsetBytesR = state->rgbChannelBytes * 2;
97             state->rgbOffsetBytesA = state->rgbChannelBytes * 3;
98             break;
99         case AVIF_RGB_FORMAT_ABGR:
100             state->rgbOffsetBytesA = state->rgbChannelBytes * 0;
101             state->rgbOffsetBytesB = state->rgbChannelBytes * 1;
102             state->rgbOffsetBytesG = state->rgbChannelBytes * 2;
103             state->rgbOffsetBytesR = state->rgbChannelBytes * 3;
104             break;
105 
106         default:
107             return AVIF_FALSE;
108     }
109 
110     state->yuvDepth = image->depth;
111     state->yuvRange = image->yuvRange;
112     state->yuvMaxChannel = (1 << image->depth) - 1;
113     state->rgbMaxChannel = (1 << rgb->depth) - 1;
114     state->rgbMaxChannelF = (float)state->rgbMaxChannel;
115     state->biasY = (state->yuvRange == AVIF_RANGE_LIMITED) ? (float)(16 << (state->yuvDepth - 8)) : 0.0f;
116     state->biasUV = (float)(1 << (state->yuvDepth - 1));
117     state->biasA = (image->alphaRange == AVIF_RANGE_LIMITED) ? (float)(16 << (state->yuvDepth - 8)) : 0.0f;
118     state->rangeY = (float)((state->yuvRange == AVIF_RANGE_LIMITED) ? (219 << (state->yuvDepth - 8)) : state->yuvMaxChannel);
119     state->rangeUV = (float)((state->yuvRange == AVIF_RANGE_LIMITED) ? (224 << (state->yuvDepth - 8)) : state->yuvMaxChannel);
120     state->rangeA = (float)((image->alphaRange == AVIF_RANGE_LIMITED) ? (219 << (state->yuvDepth - 8)) : state->yuvMaxChannel);
121 
122     uint32_t cpCount = 1 << image->depth;
123     if (state->mode == AVIF_REFORMAT_MODE_IDENTITY) {
124         for (uint32_t cp = 0; cp < cpCount; ++cp) {
125             state->unormFloatTableY[cp] = ((float)cp - state->biasY) / state->rangeY;
126             state->unormFloatTableUV[cp] = ((float)cp - state->biasY) / state->rangeY;
127         }
128     } else {
129         for (uint32_t cp = 0; cp < cpCount; ++cp) {
130             // Review this when implementing YCgCo limited range support.
131             state->unormFloatTableY[cp] = ((float)cp - state->biasY) / state->rangeY;
132             state->unormFloatTableUV[cp] = ((float)cp - state->biasUV) / state->rangeUV;
133         }
134     }
135 
136     state->toRGBAlphaMode = AVIF_ALPHA_MULTIPLY_MODE_NO_OP;
137     if (image->alphaPlane) {
138         if (!avifRGBFormatHasAlpha(rgb->format) || rgb->ignoreAlpha) {
139             // if we are converting some image with alpha into a format without alpha, we should do 'premultiply alpha' before
140             // discarding alpha plane. This has the same effect of rendering this image on a black background, which makes sense.
141             if (!image->alphaPremultiplied) {
142                 state->toRGBAlphaMode = AVIF_ALPHA_MULTIPLY_MODE_MULTIPLY;
143             }
144         } else {
145             if (!image->alphaPremultiplied && rgb->alphaPremultiplied) {
146                 state->toRGBAlphaMode = AVIF_ALPHA_MULTIPLY_MODE_MULTIPLY;
147             } else if (image->alphaPremultiplied && !rgb->alphaPremultiplied) {
148                 state->toRGBAlphaMode = AVIF_ALPHA_MULTIPLY_MODE_UNMULTIPLY;
149             }
150         }
151     }
152 
153     return AVIF_TRUE;
154 }
155 
156 // Formulas 20-31 from https://www.itu.int/rec/T-REC-H.273-201612-I/en
avifReformatStateYToUNorm(avifReformatState * state,float v)157 static int avifReformatStateYToUNorm(avifReformatState * state, float v)
158 {
159     int unorm = (int)avifRoundf(v * state->rangeY + state->biasY);
160     return AVIF_CLAMP(unorm, 0, state->yuvMaxChannel);
161 }
162 
avifReformatStateUVToUNorm(avifReformatState * state,float v)163 static int avifReformatStateUVToUNorm(avifReformatState * state, float v)
164 {
165     int unorm;
166 
167     // YCgCo performs limited-full range adjustment on R,G,B but the current implementation performs range adjustment
168     // on Y,U,V. So YCgCo with limited range is unsupported.
169     assert((state->mode != AVIF_REFORMAT_MODE_YCGCO) || (state->yuvRange == AVIF_RANGE_FULL));
170 
171     if (state->mode == AVIF_REFORMAT_MODE_IDENTITY) {
172         unorm = (int)avifRoundf(v * state->rangeY + state->biasY);
173     } else {
174         unorm = (int)avifRoundf(v * state->rangeUV + state->biasUV);
175     }
176 
177     return AVIF_CLAMP(unorm, 0, state->yuvMaxChannel);
178 }
179 
avifImageRGBToYUV(avifImage * image,const avifRGBImage * rgb)180 avifResult avifImageRGBToYUV(avifImage * image, const avifRGBImage * rgb)
181 {
182     if (!rgb->pixels) {
183         return AVIF_RESULT_REFORMAT_FAILED;
184     }
185 
186     avifReformatState state;
187     if (!avifPrepareReformatState(image, rgb, &state)) {
188         return AVIF_RESULT_REFORMAT_FAILED;
189     }
190 
191     avifAlphaMultiplyMode alphaMode = AVIF_ALPHA_MULTIPLY_MODE_NO_OP;
192     avifImageAllocatePlanes(image, AVIF_PLANES_YUV);
193     if (avifRGBFormatHasAlpha(rgb->format) && !rgb->ignoreAlpha) {
194         avifImageAllocatePlanes(image, AVIF_PLANES_A);
195         if (!rgb->alphaPremultiplied && image->alphaPremultiplied) {
196             alphaMode = AVIF_ALPHA_MULTIPLY_MODE_MULTIPLY;
197         } else if (rgb->alphaPremultiplied && !image->alphaPremultiplied) {
198             alphaMode = AVIF_ALPHA_MULTIPLY_MODE_UNMULTIPLY;
199         }
200     }
201 
202     const float kr = state.kr;
203     const float kg = state.kg;
204     const float kb = state.kb;
205 
206     struct YUVBlock yuvBlock[2][2];
207     float rgbPixel[3];
208     const float rgbMaxChannelF = state.rgbMaxChannelF;
209     uint8_t ** yuvPlanes = image->yuvPlanes;
210     uint32_t * yuvRowBytes = image->yuvRowBytes;
211     for (uint32_t outerJ = 0; outerJ < image->height; outerJ += 2) {
212         for (uint32_t outerI = 0; outerI < image->width; outerI += 2) {
213             int blockW = 2, blockH = 2;
214             if ((outerI + 1) >= image->width) {
215                 blockW = 1;
216             }
217             if ((outerJ + 1) >= image->height) {
218                 blockH = 1;
219             }
220 
221             // Convert an entire 2x2 block to YUV, and populate any fully sampled channels as we go
222             for (int bJ = 0; bJ < blockH; ++bJ) {
223                 for (int bI = 0; bI < blockW; ++bI) {
224                     int i = outerI + bI;
225                     int j = outerJ + bJ;
226 
227                     // Unpack RGB into normalized float
228                     if (state.rgbChannelBytes > 1) {
229                         rgbPixel[0] =
230                             *((uint16_t *)(&rgb->pixels[state.rgbOffsetBytesR + (i * state.rgbPixelBytes) + (j * rgb->rowBytes)])) /
231                             rgbMaxChannelF;
232                         rgbPixel[1] =
233                             *((uint16_t *)(&rgb->pixels[state.rgbOffsetBytesG + (i * state.rgbPixelBytes) + (j * rgb->rowBytes)])) /
234                             rgbMaxChannelF;
235                         rgbPixel[2] =
236                             *((uint16_t *)(&rgb->pixels[state.rgbOffsetBytesB + (i * state.rgbPixelBytes) + (j * rgb->rowBytes)])) /
237                             rgbMaxChannelF;
238                     } else {
239                         rgbPixel[0] = rgb->pixels[state.rgbOffsetBytesR + (i * state.rgbPixelBytes) + (j * rgb->rowBytes)] / rgbMaxChannelF;
240                         rgbPixel[1] = rgb->pixels[state.rgbOffsetBytesG + (i * state.rgbPixelBytes) + (j * rgb->rowBytes)] / rgbMaxChannelF;
241                         rgbPixel[2] = rgb->pixels[state.rgbOffsetBytesB + (i * state.rgbPixelBytes) + (j * rgb->rowBytes)] / rgbMaxChannelF;
242                     }
243 
244                     if (alphaMode != AVIF_ALPHA_MULTIPLY_MODE_NO_OP) {
245                         float a;
246                         if (state.rgbChannelBytes > 1) {
247                             a = *((uint16_t *)(&rgb->pixels[state.rgbOffsetBytesA + (i * state.rgbPixelBytes) + (j * rgb->rowBytes)])) /
248                                 rgbMaxChannelF;
249                         } else {
250                             a = rgb->pixels[state.rgbOffsetBytesA + (i * state.rgbPixelBytes) + (j * rgb->rowBytes)] / rgbMaxChannelF;
251                         }
252 
253                         if (alphaMode == AVIF_ALPHA_MULTIPLY_MODE_MULTIPLY) {
254                             if (a == 0) {
255                                 rgbPixel[0] = 0;
256                                 rgbPixel[1] = 0;
257                                 rgbPixel[2] = 0;
258                             } else if (a < 1.0f) {
259                                 rgbPixel[0] *= a;
260                                 rgbPixel[1] *= a;
261                                 rgbPixel[2] *= a;
262                             }
263                         } else {
264                             // alphaMode == AVIF_ALPHA_MULTIPLY_MODE_UNMULTIPLY
265                             if (a == 0) {
266                                 rgbPixel[0] = 0;
267                                 rgbPixel[1] = 0;
268                                 rgbPixel[2] = 0;
269                             } else if (a < 1.0f) {
270                                 rgbPixel[0] /= a;
271                                 rgbPixel[1] /= a;
272                                 rgbPixel[2] /= a;
273                                 rgbPixel[0] = AVIF_MIN(rgbPixel[0], 1.0f);
274                                 rgbPixel[1] = AVIF_MIN(rgbPixel[1], 1.0f);
275                                 rgbPixel[2] = AVIF_MIN(rgbPixel[2], 1.0f);
276                             }
277                         }
278                     }
279 
280                     // RGB -> YUV conversion
281                     if (state.mode == AVIF_REFORMAT_MODE_IDENTITY) {
282                         // Formulas 41,42,43 from https://www.itu.int/rec/T-REC-H.273-201612-I/en
283                         yuvBlock[bI][bJ].y = rgbPixel[1]; // G
284                         yuvBlock[bI][bJ].u = rgbPixel[2]; // B
285                         yuvBlock[bI][bJ].v = rgbPixel[0]; // R
286                     } else if (state.mode == AVIF_REFORMAT_MODE_YCGCO) {
287                         // Formulas 44,45,46 from https://www.itu.int/rec/T-REC-H.273-201612-I/en
288                         yuvBlock[bI][bJ].y = 0.5f * rgbPixel[1] + 0.25f * (rgbPixel[0] + rgbPixel[2]);
289                         yuvBlock[bI][bJ].u = 0.5f * rgbPixel[1] - 0.25f * (rgbPixel[0] + rgbPixel[2]);
290                         yuvBlock[bI][bJ].v = 0.5f * (rgbPixel[0] - rgbPixel[2]);
291                     } else {
292                         float Y = (kr * rgbPixel[0]) + (kg * rgbPixel[1]) + (kb * rgbPixel[2]);
293                         yuvBlock[bI][bJ].y = Y;
294                         yuvBlock[bI][bJ].u = (rgbPixel[2] - Y) / (2 * (1 - kb));
295                         yuvBlock[bI][bJ].v = (rgbPixel[0] - Y) / (2 * (1 - kr));
296                     }
297 
298                     if (state.yuvChannelBytes > 1) {
299                         uint16_t * pY = (uint16_t *)&yuvPlanes[AVIF_CHAN_Y][(i * 2) + (j * yuvRowBytes[AVIF_CHAN_Y])];
300                         *pY = (uint16_t)avifReformatStateYToUNorm(&state, yuvBlock[bI][bJ].y);
301                         if (image->yuvFormat == AVIF_PIXEL_FORMAT_YUV444) {
302                             // YUV444, full chroma
303                             uint16_t * pU = (uint16_t *)&yuvPlanes[AVIF_CHAN_U][(i * 2) + (j * yuvRowBytes[AVIF_CHAN_U])];
304                             *pU = (uint16_t)avifReformatStateUVToUNorm(&state, yuvBlock[bI][bJ].u);
305                             uint16_t * pV = (uint16_t *)&yuvPlanes[AVIF_CHAN_V][(i * 2) + (j * yuvRowBytes[AVIF_CHAN_V])];
306                             *pV = (uint16_t)avifReformatStateUVToUNorm(&state, yuvBlock[bI][bJ].v);
307                         }
308                     } else {
309                         yuvPlanes[AVIF_CHAN_Y][i + (j * yuvRowBytes[AVIF_CHAN_Y])] =
310                             (uint8_t)avifReformatStateYToUNorm(&state, yuvBlock[bI][bJ].y);
311                         if (image->yuvFormat == AVIF_PIXEL_FORMAT_YUV444) {
312                             // YUV444, full chroma
313                             yuvPlanes[AVIF_CHAN_U][i + (j * yuvRowBytes[AVIF_CHAN_U])] =
314                                 (uint8_t)avifReformatStateUVToUNorm(&state, yuvBlock[bI][bJ].u);
315                             yuvPlanes[AVIF_CHAN_V][i + (j * yuvRowBytes[AVIF_CHAN_V])] =
316                                 (uint8_t)avifReformatStateUVToUNorm(&state, yuvBlock[bI][bJ].v);
317                         }
318                     }
319                 }
320             }
321 
322             // Populate any subsampled channels with averages from the 2x2 block
323             if (image->yuvFormat == AVIF_PIXEL_FORMAT_YUV420) {
324                 // YUV420, average 4 samples (2x2)
325 
326                 float sumU = 0.0f;
327                 float sumV = 0.0f;
328                 for (int bJ = 0; bJ < blockH; ++bJ) {
329                     for (int bI = 0; bI < blockW; ++bI) {
330                         sumU += yuvBlock[bI][bJ].u;
331                         sumV += yuvBlock[bI][bJ].v;
332                     }
333                 }
334                 float totalSamples = (float)(blockW * blockH);
335                 float avgU = sumU / totalSamples;
336                 float avgV = sumV / totalSamples;
337 
338                 const int chromaShiftX = 1;
339                 const int chromaShiftY = 1;
340                 int uvI = outerI >> chromaShiftX;
341                 int uvJ = outerJ >> chromaShiftY;
342                 if (state.yuvChannelBytes > 1) {
343                     uint16_t * pU = (uint16_t *)&yuvPlanes[AVIF_CHAN_U][(uvI * 2) + (uvJ * yuvRowBytes[AVIF_CHAN_U])];
344                     *pU = (uint16_t)avifReformatStateUVToUNorm(&state, avgU);
345                     uint16_t * pV = (uint16_t *)&yuvPlanes[AVIF_CHAN_V][(uvI * 2) + (uvJ * yuvRowBytes[AVIF_CHAN_V])];
346                     *pV = (uint16_t)avifReformatStateUVToUNorm(&state, avgV);
347                 } else {
348                     yuvPlanes[AVIF_CHAN_U][uvI + (uvJ * yuvRowBytes[AVIF_CHAN_U])] = (uint8_t)avifReformatStateUVToUNorm(&state, avgU);
349                     yuvPlanes[AVIF_CHAN_V][uvI + (uvJ * yuvRowBytes[AVIF_CHAN_V])] = (uint8_t)avifReformatStateUVToUNorm(&state, avgV);
350                 }
351             } else if (image->yuvFormat == AVIF_PIXEL_FORMAT_YUV422) {
352                 // YUV422, average 2 samples (1x2), twice
353 
354                 for (int bJ = 0; bJ < blockH; ++bJ) {
355                     float sumU = 0.0f;
356                     float sumV = 0.0f;
357                     for (int bI = 0; bI < blockW; ++bI) {
358                         sumU += yuvBlock[bI][bJ].u;
359                         sumV += yuvBlock[bI][bJ].v;
360                     }
361                     float totalSamples = (float)blockW;
362                     float avgU = sumU / totalSamples;
363                     float avgV = sumV / totalSamples;
364 
365                     const int chromaShiftX = 1;
366                     int uvI = outerI >> chromaShiftX;
367                     int uvJ = outerJ + bJ;
368                     if (state.yuvChannelBytes > 1) {
369                         uint16_t * pU = (uint16_t *)&yuvPlanes[AVIF_CHAN_U][(uvI * 2) + (uvJ * yuvRowBytes[AVIF_CHAN_U])];
370                         *pU = (uint16_t)avifReformatStateUVToUNorm(&state, avgU);
371                         uint16_t * pV = (uint16_t *)&yuvPlanes[AVIF_CHAN_V][(uvI * 2) + (uvJ * yuvRowBytes[AVIF_CHAN_V])];
372                         *pV = (uint16_t)avifReformatStateUVToUNorm(&state, avgV);
373                     } else {
374                         yuvPlanes[AVIF_CHAN_U][uvI + (uvJ * yuvRowBytes[AVIF_CHAN_U])] =
375                             (uint8_t)avifReformatStateUVToUNorm(&state, avgU);
376                         yuvPlanes[AVIF_CHAN_V][uvI + (uvJ * yuvRowBytes[AVIF_CHAN_V])] =
377                             (uint8_t)avifReformatStateUVToUNorm(&state, avgV);
378                     }
379                 }
380             }
381         }
382     }
383 
384     if (image->alphaPlane && image->alphaRowBytes) {
385         avifAlphaParams params;
386 
387         params.width = image->width;
388         params.height = image->height;
389         params.dstDepth = image->depth;
390         params.dstRange = image->alphaRange;
391         params.dstPlane = image->alphaPlane;
392         params.dstRowBytes = image->alphaRowBytes;
393         params.dstOffsetBytes = 0;
394         params.dstPixelBytes = state.yuvChannelBytes;
395 
396         if (avifRGBFormatHasAlpha(rgb->format) && !rgb->ignoreAlpha) {
397             params.srcDepth = rgb->depth;
398             params.srcRange = AVIF_RANGE_FULL;
399             params.srcPlane = rgb->pixels;
400             params.srcRowBytes = rgb->rowBytes;
401             params.srcOffsetBytes = state.rgbOffsetBytesA;
402             params.srcPixelBytes = state.rgbPixelBytes;
403 
404             avifReformatAlpha(&params);
405         } else {
406             avifFillAlpha(&params);
407         }
408     }
409     return AVIF_RESULT_OK;
410 }
411 
412 // Note: This function handles alpha (un)multiply.
avifImageYUVAnyToRGBAnySlow(const avifImage * image,avifRGBImage * rgb,avifReformatState * state,const avifChromaUpsampling chromaUpsampling)413 static avifResult avifImageYUVAnyToRGBAnySlow(const avifImage * image,
414                                               avifRGBImage * rgb,
415                                               avifReformatState * state,
416                                               const avifChromaUpsampling chromaUpsampling)
417 {
418     // Aliases for some state
419     const float kr = state->kr;
420     const float kg = state->kg;
421     const float kb = state->kb;
422     const float * const unormFloatTableY = state->unormFloatTableY;
423     const float * const unormFloatTableUV = state->unormFloatTableUV;
424     const uint32_t yuvChannelBytes = state->yuvChannelBytes;
425     const uint32_t rgbPixelBytes = state->rgbPixelBytes;
426 
427     // Aliases for plane data
428     const uint8_t * yPlane = image->yuvPlanes[AVIF_CHAN_Y];
429     const uint8_t * uPlane = image->yuvPlanes[AVIF_CHAN_U];
430     const uint8_t * vPlane = image->yuvPlanes[AVIF_CHAN_V];
431     const uint8_t * aPlane = image->alphaPlane;
432     const uint32_t yRowBytes = image->yuvRowBytes[AVIF_CHAN_Y];
433     const uint32_t uRowBytes = image->yuvRowBytes[AVIF_CHAN_U];
434     const uint32_t vRowBytes = image->yuvRowBytes[AVIF_CHAN_V];
435     const uint32_t aRowBytes = image->alphaRowBytes;
436 
437     // Various observations and limits
438     const avifBool hasColor = (uPlane && vPlane && (image->yuvFormat != AVIF_PIXEL_FORMAT_YUV400));
439     const uint16_t yuvMaxChannel = (uint16_t)state->yuvMaxChannel;
440     const float rgbMaxChannelF = state->rgbMaxChannelF;
441 
442     // These are the only supported built-ins
443     assert((chromaUpsampling == AVIF_CHROMA_UPSAMPLING_BILINEAR) || (chromaUpsampling == AVIF_CHROMA_UPSAMPLING_NEAREST));
444 
445     // If toRGBAlphaMode is active (not no-op), assert that the alpha plane is present. The end of
446     // the avifPrepareReformatState() function should ensure this, but this assert makes it clear
447     // to clang's analyzer.
448     assert((state->toRGBAlphaMode == AVIF_ALPHA_MULTIPLY_MODE_NO_OP) || aPlane);
449 
450     for (uint32_t j = 0; j < image->height; ++j) {
451         const uint32_t uvJ = j >> state->formatInfo.chromaShiftY;
452         const uint8_t * ptrY8 = &yPlane[j * yRowBytes];
453         const uint8_t * ptrU8 = uPlane ? &uPlane[(uvJ * uRowBytes)] : NULL;
454         const uint8_t * ptrV8 = vPlane ? &vPlane[(uvJ * vRowBytes)] : NULL;
455         const uint8_t * ptrA8 = aPlane ? &aPlane[j * aRowBytes] : NULL;
456         const uint16_t * ptrY16 = (const uint16_t *)ptrY8;
457         const uint16_t * ptrU16 = (const uint16_t *)ptrU8;
458         const uint16_t * ptrV16 = (const uint16_t *)ptrV8;
459         const uint16_t * ptrA16 = (const uint16_t *)ptrA8;
460 
461         uint8_t * ptrR = &rgb->pixels[state->rgbOffsetBytesR + (j * rgb->rowBytes)];
462         uint8_t * ptrG = &rgb->pixels[state->rgbOffsetBytesG + (j * rgb->rowBytes)];
463         uint8_t * ptrB = &rgb->pixels[state->rgbOffsetBytesB + (j * rgb->rowBytes)];
464 
465         for (uint32_t i = 0; i < image->width; ++i) {
466             uint32_t uvI = i >> state->formatInfo.chromaShiftX;
467             float Y, Cb = 0.5f, Cr = 0.5f;
468 
469             // Calculate Y
470             uint16_t unormY;
471             if (image->depth == 8) {
472                 unormY = ptrY8[i];
473             } else {
474                 // clamp incoming data to protect against bad LUT lookups
475                 unormY = AVIF_MIN(ptrY16[i], yuvMaxChannel);
476             }
477             Y = unormFloatTableY[unormY];
478 
479             // Calculate Cb and Cr
480             if (hasColor) {
481                 if (image->yuvFormat == AVIF_PIXEL_FORMAT_YUV444) {
482                     uint16_t unormU, unormV;
483 
484                     if (image->depth == 8) {
485                         unormU = ptrU8[uvI];
486                         unormV = ptrV8[uvI];
487                     } else {
488                         // clamp incoming data to protect against bad LUT lookups
489                         unormU = AVIF_MIN(ptrU16[uvI], yuvMaxChannel);
490                         unormV = AVIF_MIN(ptrV16[uvI], yuvMaxChannel);
491                     }
492 
493                     Cb = unormFloatTableUV[unormU];
494                     Cr = unormFloatTableUV[unormV];
495                 } else {
496                     // Upsample to 444:
497                     //
498                     // *   *   *   *
499                     //   A       B
500                     // *   1   2   *
501                     //
502                     // *   3   4   *
503                     //   C       D
504                     // *   *   *   *
505                     //
506                     // When converting from YUV420 to RGB, for any given "high-resolution" RGB
507                     // coordinate (1,2,3,4,*), there are up to four "low-resolution" UV samples
508                     // (A,B,C,D) that are "nearest" to the pixel. For RGB pixel #1, A is the closest
509                     // UV sample, B and C are "adjacent" to it on the same row and column, and D is
510                     // the diagonal. For RGB pixel 3, C is the closest UV sample, A and D are
511                     // adjacent, and B is the diagonal. Sometimes the adjacent pixel on the same row
512                     // is to the left or right, and sometimes the adjacent pixel on the same column
513                     // is up or down. For any edge or corner, there might only be only one or two
514                     // samples nearby, so they'll be duplicated.
515                     //
516                     // The following code attempts to find all four nearest UV samples and put them
517                     // in the following unormU and unormV grid as follows:
518                     //
519                     // unorm[0][0] = closest         ( weights: bilinear: 9/16, nearest: 1 )
520                     // unorm[1][0] = adjacent col    ( weights: bilinear: 3/16, nearest: 0 )
521                     // unorm[0][1] = adjacent row    ( weights: bilinear: 3/16, nearest: 0 )
522                     // unorm[1][1] = diagonal        ( weights: bilinear: 1/16, nearest: 0 )
523                     //
524                     // It then weights them according to the requested upsampling set in avifRGBImage.
525 
526                     uint16_t unormU[2][2], unormV[2][2];
527 
528                     // How many bytes to add to a uint8_t pointer index to get to the adjacent (lesser) sample in a given direction
529                     int uAdjCol, vAdjCol, uAdjRow, vAdjRow;
530                     if ((i == 0) || ((i == (image->width - 1)) && ((i % 2) != 0))) {
531                         uAdjCol = 0;
532                         vAdjCol = 0;
533                     } else {
534                         if ((i % 2) != 0) {
535                             uAdjCol = yuvChannelBytes;
536                             vAdjCol = yuvChannelBytes;
537                         } else {
538                             uAdjCol = -1 * yuvChannelBytes;
539                             vAdjCol = -1 * yuvChannelBytes;
540                         }
541                     }
542 
543                     // For YUV422, uvJ will always be a fresh value (always corresponds to j), so
544                     // we'll simply duplicate the sample as if we were on the top or bottom row and
545                     // it'll behave as plain old linear (1D) upsampling, which is all we want.
546                     if ((j == 0) || ((j == (image->height - 1)) && ((j % 2) != 0)) || (image->yuvFormat == AVIF_PIXEL_FORMAT_YUV422)) {
547                         uAdjRow = 0;
548                         vAdjRow = 0;
549                     } else {
550                         if ((j % 2) != 0) {
551                             uAdjRow = (int)uRowBytes;
552                             vAdjRow = (int)vRowBytes;
553                         } else {
554                             uAdjRow = -1 * (int)uRowBytes;
555                             vAdjRow = -1 * (int)vRowBytes;
556                         }
557                     }
558 
559                     if (image->depth == 8) {
560                         unormU[0][0] = uPlane[(uvJ * uRowBytes) + (uvI * yuvChannelBytes)];
561                         unormV[0][0] = vPlane[(uvJ * vRowBytes) + (uvI * yuvChannelBytes)];
562                         unormU[1][0] = uPlane[(uvJ * uRowBytes) + (uvI * yuvChannelBytes) + uAdjCol];
563                         unormV[1][0] = vPlane[(uvJ * vRowBytes) + (uvI * yuvChannelBytes) + vAdjCol];
564                         unormU[0][1] = uPlane[(uvJ * uRowBytes) + (uvI * yuvChannelBytes) + uAdjRow];
565                         unormV[0][1] = vPlane[(uvJ * vRowBytes) + (uvI * yuvChannelBytes) + vAdjRow];
566                         unormU[1][1] = uPlane[(uvJ * uRowBytes) + (uvI * yuvChannelBytes) + uAdjCol + uAdjRow];
567                         unormV[1][1] = vPlane[(uvJ * vRowBytes) + (uvI * yuvChannelBytes) + vAdjCol + vAdjRow];
568                     } else {
569                         unormU[0][0] = *((const uint16_t *)&uPlane[(uvJ * uRowBytes) + (uvI * yuvChannelBytes)]);
570                         unormV[0][0] = *((const uint16_t *)&vPlane[(uvJ * vRowBytes) + (uvI * yuvChannelBytes)]);
571                         unormU[1][0] = *((const uint16_t *)&uPlane[(uvJ * uRowBytes) + (uvI * yuvChannelBytes) + uAdjCol]);
572                         unormV[1][0] = *((const uint16_t *)&vPlane[(uvJ * vRowBytes) + (uvI * yuvChannelBytes) + vAdjCol]);
573                         unormU[0][1] = *((const uint16_t *)&uPlane[(uvJ * uRowBytes) + (uvI * yuvChannelBytes) + uAdjRow]);
574                         unormV[0][1] = *((const uint16_t *)&vPlane[(uvJ * vRowBytes) + (uvI * yuvChannelBytes) + vAdjRow]);
575                         unormU[1][1] = *((const uint16_t *)&uPlane[(uvJ * uRowBytes) + (uvI * yuvChannelBytes) + uAdjCol + uAdjRow]);
576                         unormV[1][1] = *((const uint16_t *)&vPlane[(uvJ * vRowBytes) + (uvI * yuvChannelBytes) + vAdjCol + vAdjRow]);
577 
578                         // clamp incoming data to protect against bad LUT lookups
579                         for (int bJ = 0; bJ < 2; ++bJ) {
580                             for (int bI = 0; bI < 2; ++bI) {
581                                 unormU[bI][bJ] = AVIF_MIN(unormU[bI][bJ], yuvMaxChannel);
582                                 unormV[bI][bJ] = AVIF_MIN(unormV[bI][bJ], yuvMaxChannel);
583                             }
584                         }
585                     }
586 
587                     if (chromaUpsampling == AVIF_CHROMA_UPSAMPLING_BILINEAR) {
588                         // Bilinear filtering with weights
589                         Cb = (unormFloatTableUV[unormU[0][0]] * (9.0f / 16.0f)) + (unormFloatTableUV[unormU[1][0]] * (3.0f / 16.0f)) +
590                              (unormFloatTableUV[unormU[0][1]] * (3.0f / 16.0f)) + (unormFloatTableUV[unormU[1][1]] * (1.0f / 16.0f));
591                         Cr = (unormFloatTableUV[unormV[0][0]] * (9.0f / 16.0f)) + (unormFloatTableUV[unormV[1][0]] * (3.0f / 16.0f)) +
592                              (unormFloatTableUV[unormV[0][1]] * (3.0f / 16.0f)) + (unormFloatTableUV[unormV[1][1]] * (1.0f / 16.0f));
593                     } else {
594                         assert(chromaUpsampling == AVIF_CHROMA_UPSAMPLING_NEAREST);
595 
596                         // Nearest neighbor; ignore all UVs but the closest one
597                         Cb = unormFloatTableUV[unormU[0][0]];
598                         Cr = unormFloatTableUV[unormV[0][0]];
599                     }
600                 }
601             }
602 
603             float R, G, B;
604             if (hasColor) {
605                 if (state->mode == AVIF_REFORMAT_MODE_IDENTITY) {
606                     // Identity (GBR): Formulas 41,42,43 from https://www.itu.int/rec/T-REC-H.273-201612-I/en
607                     G = Y;
608                     B = Cb;
609                     R = Cr;
610                 } else if (state->mode == AVIF_REFORMAT_MODE_YCGCO) {
611                     // YCgCo: Formulas 47,48,49,50 from https://www.itu.int/rec/T-REC-H.273-201612-I/en
612                     const float t = Y - Cb;
613                     G = Y + Cb;
614                     B = t - Cr;
615                     R = t + Cr;
616                 } else {
617                     // Normal YUV
618                     R = Y + (2 * (1 - kr)) * Cr;
619                     B = Y + (2 * (1 - kb)) * Cb;
620                     G = Y - ((2 * ((kr * (1 - kr) * Cr) + (kb * (1 - kb) * Cb))) / kg);
621                 }
622             } else {
623                 // Monochrome: just populate all channels with luma (identity mode is irrelevant)
624                 R = Y;
625                 G = Y;
626                 B = Y;
627             }
628 
629             float Rc = AVIF_CLAMP(R, 0.0f, 1.0f);
630             float Gc = AVIF_CLAMP(G, 0.0f, 1.0f);
631             float Bc = AVIF_CLAMP(B, 0.0f, 1.0f);
632 
633             if (state->toRGBAlphaMode != AVIF_ALPHA_MULTIPLY_MODE_NO_OP) {
634                 // Calculate A
635                 uint16_t unormA;
636                 if (image->depth == 8) {
637                     unormA = ptrA8[i];
638                 } else {
639                     unormA = AVIF_MIN(ptrA16[i], yuvMaxChannel);
640                 }
641                 const float A = (unormA - state->biasA) / state->rangeA;
642                 const float Ac = AVIF_CLAMP(A, 0.0f, 1.0f);
643 
644                 if (state->toRGBAlphaMode == AVIF_ALPHA_MULTIPLY_MODE_MULTIPLY) {
645                     if (Ac == 0.0f) {
646                         Rc = 0.0f;
647                         Gc = 0.0f;
648                         Bc = 0.0f;
649                     } else if (Ac < 1.0f) {
650                         Rc *= Ac;
651                         Gc *= Ac;
652                         Bc *= Ac;
653                     }
654                 } else {
655                     // state->toRGBAlphaMode == AVIF_ALPHA_MULTIPLY_MODE_UNMULTIPLY
656                     if (Ac == 0.0f) {
657                         Rc = 0.0f;
658                         Gc = 0.0f;
659                         Bc = 0.0f;
660                     } else if (Ac < 1.0f) {
661                         Rc /= Ac;
662                         Gc /= Ac;
663                         Bc /= Ac;
664                         Rc = AVIF_MIN(Rc, 1.0f);
665                         Gc = AVIF_MIN(Gc, 1.0f);
666                         Bc = AVIF_MIN(Bc, 1.0f);
667                     }
668                 }
669             }
670 
671             if (rgb->depth == 8) {
672                 *ptrR = (uint8_t)(0.5f + (Rc * rgbMaxChannelF));
673                 *ptrG = (uint8_t)(0.5f + (Gc * rgbMaxChannelF));
674                 *ptrB = (uint8_t)(0.5f + (Bc * rgbMaxChannelF));
675             } else {
676                 *((uint16_t *)ptrR) = (uint16_t)(0.5f + (Rc * rgbMaxChannelF));
677                 *((uint16_t *)ptrG) = (uint16_t)(0.5f + (Gc * rgbMaxChannelF));
678                 *((uint16_t *)ptrB) = (uint16_t)(0.5f + (Bc * rgbMaxChannelF));
679             }
680             ptrR += rgbPixelBytes;
681             ptrG += rgbPixelBytes;
682             ptrB += rgbPixelBytes;
683         }
684     }
685     return AVIF_RESULT_OK;
686 }
687 
avifImageYUV16ToRGB16Color(const avifImage * image,avifRGBImage * rgb,avifReformatState * state)688 static avifResult avifImageYUV16ToRGB16Color(const avifImage * image, avifRGBImage * rgb, avifReformatState * state)
689 {
690     const float kr = state->kr;
691     const float kg = state->kg;
692     const float kb = state->kb;
693     const uint32_t rgbPixelBytes = state->rgbPixelBytes;
694     const float * const unormFloatTableY = state->unormFloatTableY;
695     const float * const unormFloatTableUV = state->unormFloatTableUV;
696 
697     const uint16_t yuvMaxChannel = (uint16_t)state->yuvMaxChannel;
698     const float rgbMaxChannelF = state->rgbMaxChannelF;
699     for (uint32_t j = 0; j < image->height; ++j) {
700         const uint32_t uvJ = j >> state->formatInfo.chromaShiftY;
701         const uint16_t * const ptrY = (uint16_t *)&image->yuvPlanes[AVIF_CHAN_Y][(j * image->yuvRowBytes[AVIF_CHAN_Y])];
702         const uint16_t * const ptrU = (uint16_t *)&image->yuvPlanes[AVIF_CHAN_U][(uvJ * image->yuvRowBytes[AVIF_CHAN_U])];
703         const uint16_t * const ptrV = (uint16_t *)&image->yuvPlanes[AVIF_CHAN_V][(uvJ * image->yuvRowBytes[AVIF_CHAN_V])];
704         uint8_t * ptrR = &rgb->pixels[state->rgbOffsetBytesR + (j * rgb->rowBytes)];
705         uint8_t * ptrG = &rgb->pixels[state->rgbOffsetBytesG + (j * rgb->rowBytes)];
706         uint8_t * ptrB = &rgb->pixels[state->rgbOffsetBytesB + (j * rgb->rowBytes)];
707 
708         for (uint32_t i = 0; i < image->width; ++i) {
709             uint32_t uvI = i >> state->formatInfo.chromaShiftX;
710 
711             // clamp incoming data to protect against bad LUT lookups
712             const uint16_t unormY = AVIF_MIN(ptrY[i], yuvMaxChannel);
713             const uint16_t unormU = AVIF_MIN(ptrU[uvI], yuvMaxChannel);
714             const uint16_t unormV = AVIF_MIN(ptrV[uvI], yuvMaxChannel);
715 
716             // Convert unorm to float
717             const float Y = unormFloatTableY[unormY];
718             const float Cb = unormFloatTableUV[unormU];
719             const float Cr = unormFloatTableUV[unormV];
720 
721             const float R = Y + (2 * (1 - kr)) * Cr;
722             const float B = Y + (2 * (1 - kb)) * Cb;
723             const float G = Y - ((2 * ((kr * (1 - kr) * Cr) + (kb * (1 - kb) * Cb))) / kg);
724             const float Rc = AVIF_CLAMP(R, 0.0f, 1.0f);
725             const float Gc = AVIF_CLAMP(G, 0.0f, 1.0f);
726             const float Bc = AVIF_CLAMP(B, 0.0f, 1.0f);
727 
728             *((uint16_t *)ptrR) = (uint16_t)(0.5f + (Rc * rgbMaxChannelF));
729             *((uint16_t *)ptrG) = (uint16_t)(0.5f + (Gc * rgbMaxChannelF));
730             *((uint16_t *)ptrB) = (uint16_t)(0.5f + (Bc * rgbMaxChannelF));
731 
732             ptrR += rgbPixelBytes;
733             ptrG += rgbPixelBytes;
734             ptrB += rgbPixelBytes;
735         }
736     }
737     return AVIF_RESULT_OK;
738 }
739 
avifImageYUV16ToRGB16Mono(const avifImage * image,avifRGBImage * rgb,avifReformatState * state)740 static avifResult avifImageYUV16ToRGB16Mono(const avifImage * image, avifRGBImage * rgb, avifReformatState * state)
741 {
742     const float kr = state->kr;
743     const float kg = state->kg;
744     const float kb = state->kb;
745     const uint32_t rgbPixelBytes = state->rgbPixelBytes;
746     const float * const unormFloatTableY = state->unormFloatTableY;
747 
748     const uint16_t yuvMaxChannel = (uint16_t)state->yuvMaxChannel;
749     const float rgbMaxChannelF = state->rgbMaxChannelF;
750     for (uint32_t j = 0; j < image->height; ++j) {
751         const uint16_t * const ptrY = (uint16_t *)&image->yuvPlanes[AVIF_CHAN_Y][(j * image->yuvRowBytes[AVIF_CHAN_Y])];
752         uint8_t * ptrR = &rgb->pixels[state->rgbOffsetBytesR + (j * rgb->rowBytes)];
753         uint8_t * ptrG = &rgb->pixels[state->rgbOffsetBytesG + (j * rgb->rowBytes)];
754         uint8_t * ptrB = &rgb->pixels[state->rgbOffsetBytesB + (j * rgb->rowBytes)];
755 
756         for (uint32_t i = 0; i < image->width; ++i) {
757             // clamp incoming data to protect against bad LUT lookups
758             const uint16_t unormY = AVIF_MIN(ptrY[i], yuvMaxChannel);
759 
760             // Convert unorm to float
761             const float Y = unormFloatTableY[unormY];
762             const float Cb = 0.0f;
763             const float Cr = 0.0f;
764 
765             const float R = Y + (2 * (1 - kr)) * Cr;
766             const float B = Y + (2 * (1 - kb)) * Cb;
767             const float G = Y - ((2 * ((kr * (1 - kr) * Cr) + (kb * (1 - kb) * Cb))) / kg);
768             const float Rc = AVIF_CLAMP(R, 0.0f, 1.0f);
769             const float Gc = AVIF_CLAMP(G, 0.0f, 1.0f);
770             const float Bc = AVIF_CLAMP(B, 0.0f, 1.0f);
771 
772             *((uint16_t *)ptrR) = (uint16_t)(0.5f + (Rc * rgbMaxChannelF));
773             *((uint16_t *)ptrG) = (uint16_t)(0.5f + (Gc * rgbMaxChannelF));
774             *((uint16_t *)ptrB) = (uint16_t)(0.5f + (Bc * rgbMaxChannelF));
775 
776             ptrR += rgbPixelBytes;
777             ptrG += rgbPixelBytes;
778             ptrB += rgbPixelBytes;
779         }
780     }
781     return AVIF_RESULT_OK;
782 }
783 
avifImageYUV16ToRGB8Color(const avifImage * image,avifRGBImage * rgb,avifReformatState * state)784 static avifResult avifImageYUV16ToRGB8Color(const avifImage * image, avifRGBImage * rgb, avifReformatState * state)
785 {
786     const float kr = state->kr;
787     const float kg = state->kg;
788     const float kb = state->kb;
789     const uint32_t rgbPixelBytes = state->rgbPixelBytes;
790     const float * const unormFloatTableY = state->unormFloatTableY;
791     const float * const unormFloatTableUV = state->unormFloatTableUV;
792 
793     const uint16_t yuvMaxChannel = (uint16_t)state->yuvMaxChannel;
794     const float rgbMaxChannelF = state->rgbMaxChannelF;
795     for (uint32_t j = 0; j < image->height; ++j) {
796         const uint32_t uvJ = j >> state->formatInfo.chromaShiftY;
797         const uint16_t * const ptrY = (uint16_t *)&image->yuvPlanes[AVIF_CHAN_Y][(j * image->yuvRowBytes[AVIF_CHAN_Y])];
798         const uint16_t * const ptrU = (uint16_t *)&image->yuvPlanes[AVIF_CHAN_U][(uvJ * image->yuvRowBytes[AVIF_CHAN_U])];
799         const uint16_t * const ptrV = (uint16_t *)&image->yuvPlanes[AVIF_CHAN_V][(uvJ * image->yuvRowBytes[AVIF_CHAN_V])];
800         uint8_t * ptrR = &rgb->pixels[state->rgbOffsetBytesR + (j * rgb->rowBytes)];
801         uint8_t * ptrG = &rgb->pixels[state->rgbOffsetBytesG + (j * rgb->rowBytes)];
802         uint8_t * ptrB = &rgb->pixels[state->rgbOffsetBytesB + (j * rgb->rowBytes)];
803 
804         for (uint32_t i = 0; i < image->width; ++i) {
805             uint32_t uvI = i >> state->formatInfo.chromaShiftX;
806 
807             // clamp incoming data to protect against bad LUT lookups
808             const uint16_t unormY = AVIF_MIN(ptrY[i], yuvMaxChannel);
809             const uint16_t unormU = AVIF_MIN(ptrU[uvI], yuvMaxChannel);
810             const uint16_t unormV = AVIF_MIN(ptrV[uvI], yuvMaxChannel);
811 
812             // Convert unorm to float
813             const float Y = unormFloatTableY[unormY];
814             const float Cb = unormFloatTableUV[unormU];
815             const float Cr = unormFloatTableUV[unormV];
816 
817             const float R = Y + (2 * (1 - kr)) * Cr;
818             const float B = Y + (2 * (1 - kb)) * Cb;
819             const float G = Y - ((2 * ((kr * (1 - kr) * Cr) + (kb * (1 - kb) * Cb))) / kg);
820             const float Rc = AVIF_CLAMP(R, 0.0f, 1.0f);
821             const float Gc = AVIF_CLAMP(G, 0.0f, 1.0f);
822             const float Bc = AVIF_CLAMP(B, 0.0f, 1.0f);
823 
824             *ptrR = (uint8_t)(0.5f + (Rc * rgbMaxChannelF));
825             *ptrG = (uint8_t)(0.5f + (Gc * rgbMaxChannelF));
826             *ptrB = (uint8_t)(0.5f + (Bc * rgbMaxChannelF));
827 
828             ptrR += rgbPixelBytes;
829             ptrG += rgbPixelBytes;
830             ptrB += rgbPixelBytes;
831         }
832     }
833     return AVIF_RESULT_OK;
834 }
835 
avifImageYUV16ToRGB8Mono(const avifImage * image,avifRGBImage * rgb,avifReformatState * state)836 static avifResult avifImageYUV16ToRGB8Mono(const avifImage * image, avifRGBImage * rgb, avifReformatState * state)
837 {
838     const float kr = state->kr;
839     const float kg = state->kg;
840     const float kb = state->kb;
841     const uint32_t rgbPixelBytes = state->rgbPixelBytes;
842     const float * const unormFloatTableY = state->unormFloatTableY;
843 
844     const uint16_t yuvMaxChannel = (uint16_t)state->yuvMaxChannel;
845     const float rgbMaxChannelF = state->rgbMaxChannelF;
846     for (uint32_t j = 0; j < image->height; ++j) {
847         const uint16_t * const ptrY = (uint16_t *)&image->yuvPlanes[AVIF_CHAN_Y][(j * image->yuvRowBytes[AVIF_CHAN_Y])];
848         uint8_t * ptrR = &rgb->pixels[state->rgbOffsetBytesR + (j * rgb->rowBytes)];
849         uint8_t * ptrG = &rgb->pixels[state->rgbOffsetBytesG + (j * rgb->rowBytes)];
850         uint8_t * ptrB = &rgb->pixels[state->rgbOffsetBytesB + (j * rgb->rowBytes)];
851 
852         for (uint32_t i = 0; i < image->width; ++i) {
853             // clamp incoming data to protect against bad LUT lookups
854             const uint16_t unormY = AVIF_MIN(ptrY[i], yuvMaxChannel);
855 
856             // Convert unorm to float
857             const float Y = unormFloatTableY[unormY];
858             const float Cb = 0.0f;
859             const float Cr = 0.0f;
860 
861             const float R = Y + (2 * (1 - kr)) * Cr;
862             const float B = Y + (2 * (1 - kb)) * Cb;
863             const float G = Y - ((2 * ((kr * (1 - kr) * Cr) + (kb * (1 - kb) * Cb))) / kg);
864             const float Rc = AVIF_CLAMP(R, 0.0f, 1.0f);
865             const float Gc = AVIF_CLAMP(G, 0.0f, 1.0f);
866             const float Bc = AVIF_CLAMP(B, 0.0f, 1.0f);
867 
868             *ptrR = (uint8_t)(0.5f + (Rc * rgbMaxChannelF));
869             *ptrG = (uint8_t)(0.5f + (Gc * rgbMaxChannelF));
870             *ptrB = (uint8_t)(0.5f + (Bc * rgbMaxChannelF));
871 
872             ptrR += rgbPixelBytes;
873             ptrG += rgbPixelBytes;
874             ptrB += rgbPixelBytes;
875         }
876     }
877     return AVIF_RESULT_OK;
878 }
879 
avifImageYUV8ToRGB16Color(const avifImage * image,avifRGBImage * rgb,avifReformatState * state)880 static avifResult avifImageYUV8ToRGB16Color(const avifImage * image, avifRGBImage * rgb, avifReformatState * state)
881 {
882     const float kr = state->kr;
883     const float kg = state->kg;
884     const float kb = state->kb;
885     const uint32_t rgbPixelBytes = state->rgbPixelBytes;
886     const float * const unormFloatTableY = state->unormFloatTableY;
887     const float * const unormFloatTableUV = state->unormFloatTableUV;
888 
889     const float rgbMaxChannelF = state->rgbMaxChannelF;
890     for (uint32_t j = 0; j < image->height; ++j) {
891         const uint32_t uvJ = j >> state->formatInfo.chromaShiftY;
892         const uint8_t * const ptrY = &image->yuvPlanes[AVIF_CHAN_Y][(j * image->yuvRowBytes[AVIF_CHAN_Y])];
893         const uint8_t * const ptrU = &image->yuvPlanes[AVIF_CHAN_U][(uvJ * image->yuvRowBytes[AVIF_CHAN_U])];
894         const uint8_t * const ptrV = &image->yuvPlanes[AVIF_CHAN_V][(uvJ * image->yuvRowBytes[AVIF_CHAN_V])];
895         uint8_t * ptrR = &rgb->pixels[state->rgbOffsetBytesR + (j * rgb->rowBytes)];
896         uint8_t * ptrG = &rgb->pixels[state->rgbOffsetBytesG + (j * rgb->rowBytes)];
897         uint8_t * ptrB = &rgb->pixels[state->rgbOffsetBytesB + (j * rgb->rowBytes)];
898 
899         for (uint32_t i = 0; i < image->width; ++i) {
900             uint32_t uvI = i >> state->formatInfo.chromaShiftX;
901 
902             // Convert unorm to float (no clamp necessary, the full uint8_t range is a legal lookup)
903             const float Y = unormFloatTableY[ptrY[i]];
904             const float Cb = unormFloatTableUV[ptrU[uvI]];
905             const float Cr = unormFloatTableUV[ptrV[uvI]];
906 
907             const float R = Y + (2 * (1 - kr)) * Cr;
908             const float B = Y + (2 * (1 - kb)) * Cb;
909             const float G = Y - ((2 * ((kr * (1 - kr) * Cr) + (kb * (1 - kb) * Cb))) / kg);
910             const float Rc = AVIF_CLAMP(R, 0.0f, 1.0f);
911             const float Gc = AVIF_CLAMP(G, 0.0f, 1.0f);
912             const float Bc = AVIF_CLAMP(B, 0.0f, 1.0f);
913 
914             *((uint16_t *)ptrR) = (uint16_t)(0.5f + (Rc * rgbMaxChannelF));
915             *((uint16_t *)ptrG) = (uint16_t)(0.5f + (Gc * rgbMaxChannelF));
916             *((uint16_t *)ptrB) = (uint16_t)(0.5f + (Bc * rgbMaxChannelF));
917 
918             ptrR += rgbPixelBytes;
919             ptrG += rgbPixelBytes;
920             ptrB += rgbPixelBytes;
921         }
922     }
923     return AVIF_RESULT_OK;
924 }
925 
avifImageYUV8ToRGB16Mono(const avifImage * image,avifRGBImage * rgb,avifReformatState * state)926 static avifResult avifImageYUV8ToRGB16Mono(const avifImage * image, avifRGBImage * rgb, avifReformatState * state)
927 {
928     const float kr = state->kr;
929     const float kg = state->kg;
930     const float kb = state->kb;
931     const uint32_t rgbPixelBytes = state->rgbPixelBytes;
932     const float * const unormFloatTableY = state->unormFloatTableY;
933 
934     const float rgbMaxChannelF = state->rgbMaxChannelF;
935     for (uint32_t j = 0; j < image->height; ++j) {
936         const uint8_t * const ptrY = &image->yuvPlanes[AVIF_CHAN_Y][(j * image->yuvRowBytes[AVIF_CHAN_Y])];
937         uint8_t * ptrR = &rgb->pixels[state->rgbOffsetBytesR + (j * rgb->rowBytes)];
938         uint8_t * ptrG = &rgb->pixels[state->rgbOffsetBytesG + (j * rgb->rowBytes)];
939         uint8_t * ptrB = &rgb->pixels[state->rgbOffsetBytesB + (j * rgb->rowBytes)];
940 
941         for (uint32_t i = 0; i < image->width; ++i) {
942             // Convert unorm to float (no clamp necessary, the full uint8_t range is a legal lookup)
943             const float Y = unormFloatTableY[ptrY[i]];
944             const float Cb = 0.0f;
945             const float Cr = 0.0f;
946 
947             const float R = Y + (2 * (1 - kr)) * Cr;
948             const float B = Y + (2 * (1 - kb)) * Cb;
949             const float G = Y - ((2 * ((kr * (1 - kr) * Cr) + (kb * (1 - kb) * Cb))) / kg);
950             const float Rc = AVIF_CLAMP(R, 0.0f, 1.0f);
951             const float Gc = AVIF_CLAMP(G, 0.0f, 1.0f);
952             const float Bc = AVIF_CLAMP(B, 0.0f, 1.0f);
953 
954             *((uint16_t *)ptrR) = (uint16_t)(0.5f + (Rc * rgbMaxChannelF));
955             *((uint16_t *)ptrG) = (uint16_t)(0.5f + (Gc * rgbMaxChannelF));
956             *((uint16_t *)ptrB) = (uint16_t)(0.5f + (Bc * rgbMaxChannelF));
957 
958             ptrR += rgbPixelBytes;
959             ptrG += rgbPixelBytes;
960             ptrB += rgbPixelBytes;
961         }
962     }
963     return AVIF_RESULT_OK;
964 }
965 
avifImageIdentity8ToRGB8ColorFullRange(const avifImage * image,avifRGBImage * rgb,avifReformatState * state)966 static avifResult avifImageIdentity8ToRGB8ColorFullRange(const avifImage * image, avifRGBImage * rgb, avifReformatState * state)
967 {
968     const uint32_t rgbPixelBytes = state->rgbPixelBytes;
969     for (uint32_t j = 0; j < image->height; ++j) {
970         const uint8_t * const ptrY = &image->yuvPlanes[AVIF_CHAN_Y][(j * image->yuvRowBytes[AVIF_CHAN_Y])];
971         const uint8_t * const ptrU = &image->yuvPlanes[AVIF_CHAN_U][(j * image->yuvRowBytes[AVIF_CHAN_U])];
972         const uint8_t * const ptrV = &image->yuvPlanes[AVIF_CHAN_V][(j * image->yuvRowBytes[AVIF_CHAN_V])];
973         uint8_t * ptrR = &rgb->pixels[state->rgbOffsetBytesR + (j * rgb->rowBytes)];
974         uint8_t * ptrG = &rgb->pixels[state->rgbOffsetBytesG + (j * rgb->rowBytes)];
975         uint8_t * ptrB = &rgb->pixels[state->rgbOffsetBytesB + (j * rgb->rowBytes)];
976 
977         for (uint32_t i = 0; i < image->width; ++i) {
978             *ptrR = ptrV[i];
979             *ptrG = ptrY[i];
980             *ptrB = ptrU[i];
981 
982             ptrR += rgbPixelBytes;
983             ptrG += rgbPixelBytes;
984             ptrB += rgbPixelBytes;
985         }
986     }
987     return AVIF_RESULT_OK;
988 }
989 
avifImageYUV8ToRGB8Color(const avifImage * image,avifRGBImage * rgb,avifReformatState * state)990 static avifResult avifImageYUV8ToRGB8Color(const avifImage * image, avifRGBImage * rgb, avifReformatState * state)
991 {
992     const float kr = state->kr;
993     const float kg = state->kg;
994     const float kb = state->kb;
995     const uint32_t rgbPixelBytes = state->rgbPixelBytes;
996     const float * const unormFloatTableY = state->unormFloatTableY;
997     const float * const unormFloatTableUV = state->unormFloatTableUV;
998 
999     const float rgbMaxChannelF = state->rgbMaxChannelF;
1000     for (uint32_t j = 0; j < image->height; ++j) {
1001         const uint32_t uvJ = j >> state->formatInfo.chromaShiftY;
1002         const uint8_t * const ptrY = &image->yuvPlanes[AVIF_CHAN_Y][(j * image->yuvRowBytes[AVIF_CHAN_Y])];
1003         const uint8_t * const ptrU = &image->yuvPlanes[AVIF_CHAN_U][(uvJ * image->yuvRowBytes[AVIF_CHAN_U])];
1004         const uint8_t * const ptrV = &image->yuvPlanes[AVIF_CHAN_V][(uvJ * image->yuvRowBytes[AVIF_CHAN_V])];
1005         uint8_t * ptrR = &rgb->pixels[state->rgbOffsetBytesR + (j * rgb->rowBytes)];
1006         uint8_t * ptrG = &rgb->pixels[state->rgbOffsetBytesG + (j * rgb->rowBytes)];
1007         uint8_t * ptrB = &rgb->pixels[state->rgbOffsetBytesB + (j * rgb->rowBytes)];
1008 
1009         for (uint32_t i = 0; i < image->width; ++i) {
1010             uint32_t uvI = i >> state->formatInfo.chromaShiftX;
1011 
1012             // Convert unorm to float (no clamp necessary, the full uint8_t range is a legal lookup)
1013             const float Y = unormFloatTableY[ptrY[i]];
1014             const float Cb = unormFloatTableUV[ptrU[uvI]];
1015             const float Cr = unormFloatTableUV[ptrV[uvI]];
1016 
1017             const float R = Y + (2 * (1 - kr)) * Cr;
1018             const float B = Y + (2 * (1 - kb)) * Cb;
1019             const float G = Y - ((2 * ((kr * (1 - kr) * Cr) + (kb * (1 - kb) * Cb))) / kg);
1020             const float Rc = AVIF_CLAMP(R, 0.0f, 1.0f);
1021             const float Gc = AVIF_CLAMP(G, 0.0f, 1.0f);
1022             const float Bc = AVIF_CLAMP(B, 0.0f, 1.0f);
1023 
1024             *ptrR = (uint8_t)(0.5f + (Rc * rgbMaxChannelF));
1025             *ptrG = (uint8_t)(0.5f + (Gc * rgbMaxChannelF));
1026             *ptrB = (uint8_t)(0.5f + (Bc * rgbMaxChannelF));
1027 
1028             ptrR += rgbPixelBytes;
1029             ptrG += rgbPixelBytes;
1030             ptrB += rgbPixelBytes;
1031         }
1032     }
1033     return AVIF_RESULT_OK;
1034 }
1035 
avifImageYUV8ToRGB8Mono(const avifImage * image,avifRGBImage * rgb,avifReformatState * state)1036 static avifResult avifImageYUV8ToRGB8Mono(const avifImage * image, avifRGBImage * rgb, avifReformatState * state)
1037 {
1038     const float kr = state->kr;
1039     const float kg = state->kg;
1040     const float kb = state->kb;
1041     const uint32_t rgbPixelBytes = state->rgbPixelBytes;
1042     const float * const unormFloatTableY = state->unormFloatTableY;
1043 
1044     const float rgbMaxChannelF = state->rgbMaxChannelF;
1045     for (uint32_t j = 0; j < image->height; ++j) {
1046         const uint8_t * const ptrY = &image->yuvPlanes[AVIF_CHAN_Y][(j * image->yuvRowBytes[AVIF_CHAN_Y])];
1047         uint8_t * ptrR = &rgb->pixels[state->rgbOffsetBytesR + (j * rgb->rowBytes)];
1048         uint8_t * ptrG = &rgb->pixels[state->rgbOffsetBytesG + (j * rgb->rowBytes)];
1049         uint8_t * ptrB = &rgb->pixels[state->rgbOffsetBytesB + (j * rgb->rowBytes)];
1050 
1051         for (uint32_t i = 0; i < image->width; ++i) {
1052             // Convert unorm to float (no clamp necessary, the full uint8_t range is a legal lookup)
1053             const float Y = unormFloatTableY[ptrY[i]];
1054             const float Cb = 0.0f;
1055             const float Cr = 0.0f;
1056 
1057             const float R = Y + (2 * (1 - kr)) * Cr;
1058             const float B = Y + (2 * (1 - kb)) * Cb;
1059             const float G = Y - ((2 * ((kr * (1 - kr) * Cr) + (kb * (1 - kb) * Cb))) / kg);
1060             const float Rc = AVIF_CLAMP(R, 0.0f, 1.0f);
1061             const float Gc = AVIF_CLAMP(G, 0.0f, 1.0f);
1062             const float Bc = AVIF_CLAMP(B, 0.0f, 1.0f);
1063 
1064             *ptrR = (uint8_t)(0.5f + (Rc * rgbMaxChannelF));
1065             *ptrG = (uint8_t)(0.5f + (Gc * rgbMaxChannelF));
1066             *ptrB = (uint8_t)(0.5f + (Bc * rgbMaxChannelF));
1067 
1068             ptrR += rgbPixelBytes;
1069             ptrG += rgbPixelBytes;
1070             ptrB += rgbPixelBytes;
1071         }
1072     }
1073     return AVIF_RESULT_OK;
1074 }
1075 
avifImageYUVToRGB(const avifImage * image,avifRGBImage * rgb)1076 avifResult avifImageYUVToRGB(const avifImage * image, avifRGBImage * rgb)
1077 {
1078     if (!image->yuvPlanes[AVIF_CHAN_Y]) {
1079         return AVIF_RESULT_REFORMAT_FAILED;
1080     }
1081 
1082     avifReformatState state;
1083     if (!avifPrepareReformatState(image, rgb, &state)) {
1084         return AVIF_RESULT_REFORMAT_FAILED;
1085     }
1086 
1087     avifAlphaMultiplyMode alphaMultiplyMode = state.toRGBAlphaMode;
1088     avifBool convertedWithLibYUV = AVIF_FALSE;
1089     if (alphaMultiplyMode == AVIF_ALPHA_MULTIPLY_MODE_NO_OP || avifRGBFormatHasAlpha(rgb->format)) {
1090         avifResult libyuvResult = avifImageYUVToRGBLibYUV(image, rgb);
1091         if (libyuvResult == AVIF_RESULT_OK) {
1092             convertedWithLibYUV = AVIF_TRUE;
1093         } else {
1094             if (libyuvResult != AVIF_RESULT_NOT_IMPLEMENTED) {
1095                 return libyuvResult;
1096             }
1097         }
1098     }
1099 
1100     // Reformat alpha, if user asks for it, or (un)multiply processing needs it.
1101     if (avifRGBFormatHasAlpha(rgb->format) && (!rgb->ignoreAlpha || (alphaMultiplyMode != AVIF_ALPHA_MULTIPLY_MODE_NO_OP))) {
1102         avifAlphaParams params;
1103 
1104         params.width = rgb->width;
1105         params.height = rgb->height;
1106         params.dstDepth = rgb->depth;
1107         params.dstRange = AVIF_RANGE_FULL;
1108         params.dstPlane = rgb->pixels;
1109         params.dstRowBytes = rgb->rowBytes;
1110         params.dstOffsetBytes = state.rgbOffsetBytesA;
1111         params.dstPixelBytes = state.rgbPixelBytes;
1112 
1113         if (image->alphaPlane && image->alphaRowBytes) {
1114             params.srcDepth = image->depth;
1115             params.srcRange = image->alphaRange;
1116             params.srcPlane = image->alphaPlane;
1117             params.srcRowBytes = image->alphaRowBytes;
1118             params.srcOffsetBytes = 0;
1119             params.srcPixelBytes = state.yuvChannelBytes;
1120 
1121             avifReformatAlpha(&params);
1122         } else {
1123             if (!convertedWithLibYUV) { // libyuv fills alpha for us
1124                 avifFillAlpha(&params);
1125             }
1126         }
1127     }
1128 
1129     if (!convertedWithLibYUV) {
1130         // libyuv is either unavailable or unable to perform the specific conversion required here.
1131         // Look over the available built-in "fast" routines for YUV->RGB conversion and see if one
1132         // fits the current combination, or as a last resort, call avifImageYUVAnyToRGBAnySlow(),
1133         // which handles every possibly YUV->RGB combination, but very slowly (in comparison).
1134 
1135         avifResult convertResult = AVIF_RESULT_NOT_IMPLEMENTED;
1136 
1137         avifChromaUpsampling chromaUpsampling;
1138         switch (rgb->chromaUpsampling) {
1139             case AVIF_CHROMA_UPSAMPLING_AUTOMATIC:
1140             case AVIF_CHROMA_UPSAMPLING_BEST_QUALITY:
1141             case AVIF_CHROMA_UPSAMPLING_BILINEAR:
1142             default:
1143                 chromaUpsampling = AVIF_CHROMA_UPSAMPLING_BILINEAR;
1144                 break;
1145 
1146             case AVIF_CHROMA_UPSAMPLING_FASTEST:
1147             case AVIF_CHROMA_UPSAMPLING_NEAREST:
1148                 chromaUpsampling = AVIF_CHROMA_UPSAMPLING_NEAREST;
1149                 break;
1150         }
1151 
1152         const avifBool hasColor =
1153             (image->yuvRowBytes[AVIF_CHAN_U] && image->yuvRowBytes[AVIF_CHAN_V] && (image->yuvFormat != AVIF_PIXEL_FORMAT_YUV400));
1154 
1155         if ((!hasColor || (image->yuvFormat == AVIF_PIXEL_FORMAT_YUV444) || (chromaUpsampling == AVIF_CHROMA_UPSAMPLING_NEAREST)) &&
1156             (alphaMultiplyMode == AVIF_ALPHA_MULTIPLY_MODE_NO_OP || avifRGBFormatHasAlpha(rgb->format))) {
1157             // Explanations on the above conditional:
1158             // * None of these fast paths currently support bilinear upsampling, so avoid all of them
1159             //   unless the YUV data isn't subsampled or they explicitly requested AVIF_CHROMA_UPSAMPLING_NEAREST.
1160             // * None of these fast paths currently handle alpha (un)multiply, so avoid all of them
1161             //   if we can't do alpha (un)multiply as a separated post step (destination format doesn't have alpha).
1162 
1163             if (state.mode == AVIF_REFORMAT_MODE_IDENTITY) {
1164                 if ((image->depth == 8) && (rgb->depth == 8) && (image->yuvFormat == AVIF_PIXEL_FORMAT_YUV444) &&
1165                     (image->yuvRange == AVIF_RANGE_FULL)) {
1166                     convertResult = avifImageIdentity8ToRGB8ColorFullRange(image, rgb, &state);
1167                 }
1168 
1169                 // TODO: Add more fast paths for identity
1170             } else if (state.mode == AVIF_REFORMAT_MODE_YUV_COEFFICIENTS) {
1171                 if (image->depth > 8) {
1172                     // yuv:u16
1173 
1174                     if (rgb->depth > 8) {
1175                         // yuv:u16, rgb:u16
1176 
1177                         if (hasColor) {
1178                             convertResult = avifImageYUV16ToRGB16Color(image, rgb, &state);
1179                         } else {
1180                             convertResult = avifImageYUV16ToRGB16Mono(image, rgb, &state);
1181                         }
1182                     } else {
1183                         // yuv:u16, rgb:u8
1184 
1185                         if (hasColor) {
1186                             convertResult = avifImageYUV16ToRGB8Color(image, rgb, &state);
1187                         } else {
1188                             convertResult = avifImageYUV16ToRGB8Mono(image, rgb, &state);
1189                         }
1190                     }
1191                 } else {
1192                     // yuv:u8
1193 
1194                     if (rgb->depth > 8) {
1195                         // yuv:u8, rgb:u16
1196 
1197                         if (hasColor) {
1198                             convertResult = avifImageYUV8ToRGB16Color(image, rgb, &state);
1199                         } else {
1200                             convertResult = avifImageYUV8ToRGB16Mono(image, rgb, &state);
1201                         }
1202                     } else {
1203                         // yuv:u8, rgb:u8
1204 
1205                         if (hasColor) {
1206                             convertResult = avifImageYUV8ToRGB8Color(image, rgb, &state);
1207                         } else {
1208                             convertResult = avifImageYUV8ToRGB8Mono(image, rgb, &state);
1209                         }
1210                     }
1211                 }
1212             }
1213         }
1214 
1215         if (convertResult == AVIF_RESULT_NOT_IMPLEMENTED) {
1216             // If we get here, there is no fast path for this combination. Time to be slow!
1217             convertResult = avifImageYUVAnyToRGBAnySlow(image, rgb, &state, chromaUpsampling);
1218 
1219             // The slow path also handles alpha (un)multiply, so forget the operation here.
1220             alphaMultiplyMode = AVIF_ALPHA_MULTIPLY_MODE_NO_OP;
1221         }
1222 
1223         if (convertResult != AVIF_RESULT_OK) {
1224             return convertResult;
1225         }
1226     }
1227 
1228     // Process alpha premultiplication, if necessary
1229     if (alphaMultiplyMode == AVIF_ALPHA_MULTIPLY_MODE_MULTIPLY) {
1230         return avifRGBImagePremultiplyAlpha(rgb);
1231     } else if (alphaMultiplyMode == AVIF_ALPHA_MULTIPLY_MODE_UNMULTIPLY) {
1232         return avifRGBImageUnpremultiplyAlpha(rgb);
1233     }
1234 
1235     return AVIF_RESULT_OK;
1236 }
1237 
1238 // Limited -> Full
1239 // Plan: subtract limited offset, then multiply by ratio of FULLSIZE/LIMITEDSIZE (rounding), then clamp.
1240 // RATIO = (FULLY - 0) / (MAXLIMITEDY - MINLIMITEDY)
1241 // -----------------------------------------
1242 // ( ( (v - MINLIMITEDY)                    | subtract limited offset
1243 //     * FULLY                              | multiply numerator of ratio
1244 //   ) + ((MAXLIMITEDY - MINLIMITEDY) / 2)  | add 0.5 (half of denominator) to round
1245 // ) / (MAXLIMITEDY - MINLIMITEDY)          | divide by denominator of ratio
1246 // AVIF_CLAMP(v, 0, FULLY)                  | clamp to full range
1247 // -----------------------------------------
1248 #define LIMITED_TO_FULL(MINLIMITEDY, MAXLIMITEDY, FULLY)                                                 \
1249     v = (((v - MINLIMITEDY) * FULLY) + ((MAXLIMITEDY - MINLIMITEDY) / 2)) / (MAXLIMITEDY - MINLIMITEDY); \
1250     v = AVIF_CLAMP(v, 0, FULLY)
1251 
1252 // Full -> Limited
1253 // Plan: multiply by ratio of LIMITEDSIZE/FULLSIZE (rounding), then add limited offset, then clamp.
1254 // RATIO = (MAXLIMITEDY - MINLIMITEDY) / (FULLY - 0)
1255 // -----------------------------------------
1256 // ( ( (v * (MAXLIMITEDY - MINLIMITEDY))    | multiply numerator of ratio
1257 //     + (FULLY / 2)                        | add 0.5 (half of denominator) to round
1258 //   ) / FULLY                              | divide by denominator of ratio
1259 // ) + MINLIMITEDY                          | add limited offset
1260 //  AVIF_CLAMP(v, MINLIMITEDY, MAXLIMITEDY) | clamp to limited range
1261 // -----------------------------------------
1262 #define FULL_TO_LIMITED(MINLIMITEDY, MAXLIMITEDY, FULLY)                           \
1263     v = (((v * (MAXLIMITEDY - MINLIMITEDY)) + (FULLY / 2)) / FULLY) + MINLIMITEDY; \
1264     v = AVIF_CLAMP(v, MINLIMITEDY, MAXLIMITEDY)
1265 
avifLimitedToFullY(int depth,int v)1266 int avifLimitedToFullY(int depth, int v)
1267 {
1268     switch (depth) {
1269         case 8:
1270             LIMITED_TO_FULL(16, 235, 255);
1271             break;
1272         case 10:
1273             LIMITED_TO_FULL(64, 940, 1023);
1274             break;
1275         case 12:
1276             LIMITED_TO_FULL(256, 3760, 4095);
1277             break;
1278     }
1279     return v;
1280 }
1281 
avifLimitedToFullUV(int depth,int v)1282 int avifLimitedToFullUV(int depth, int v)
1283 {
1284     switch (depth) {
1285         case 8:
1286             LIMITED_TO_FULL(16, 240, 255);
1287             break;
1288         case 10:
1289             LIMITED_TO_FULL(64, 960, 1023);
1290             break;
1291         case 12:
1292             LIMITED_TO_FULL(256, 3840, 4095);
1293             break;
1294     }
1295     return v;
1296 }
1297 
avifFullToLimitedY(int depth,int v)1298 int avifFullToLimitedY(int depth, int v)
1299 {
1300     switch (depth) {
1301         case 8:
1302             FULL_TO_LIMITED(16, 235, 255);
1303             break;
1304         case 10:
1305             FULL_TO_LIMITED(64, 940, 1023);
1306             break;
1307         case 12:
1308             FULL_TO_LIMITED(256, 3760, 4095);
1309             break;
1310     }
1311     return v;
1312 }
1313 
avifFullToLimitedUV(int depth,int v)1314 int avifFullToLimitedUV(int depth, int v)
1315 {
1316     switch (depth) {
1317         case 8:
1318             FULL_TO_LIMITED(16, 240, 255);
1319             break;
1320         case 10:
1321             FULL_TO_LIMITED(64, 960, 1023);
1322             break;
1323         case 12:
1324             FULL_TO_LIMITED(256, 3840, 4095);
1325             break;
1326     }
1327     return v;
1328 }
1329