1 /*
2  * jccolext-neon.c - colorspace conversion (Arm NEON)
3  *
4  * Copyright 2020 The Chromium Authors. All Rights Reserved.
5  *
6  * This software is provided 'as-is', without any express or implied
7  * warranty.  In no event will the authors be held liable for any damages
8  * arising from the use of this software.
9  *
10  * Permission is granted to anyone to use this software for any purpose,
11  * including commercial applications, and to alter it and redistribute it
12  * freely, subject to the following restrictions:
13  *
14  * 1. The origin of this software must not be misrepresented; you must not
15  *    claim that you wrote the original software. If you use this software
16  *    in a product, an acknowledgment in the product documentation would be
17  *    appreciated but is not required.
18  * 2. Altered source versions must be plainly marked as such, and must not be
19  *    misrepresented as being the original software.
20  * 3. This notice may not be removed or altered from any source distribution.
21  */
22 
23 /* This file is included by jccolor-neon.c */
24 
25 /*
26  * RGB -> YCbCr conversion is defined by the following equations:
27  *    Y  =  0.29900 * R + 0.58700 * G + 0.11400 * B
28  *    Cb = -0.16874 * R - 0.33126 * G + 0.50000 * B  + 128
29  *    Cr =  0.50000 * R - 0.41869 * G - 0.08131 * B  + 128
30  *
31  * Avoid floating point arithmetic by using shifted integer constants:
32  *    0.29899597 = 19595 * 2^-16
33  *    0.58700561 = 38470 * 2^-16
34  *    0.11399841 =  7471 * 2^-16
35  *    0.16874695 = 11059 * 2^-16
36  *    0.33125305 = 21709 * 2^-16
37  *    0.50000000 = 32768 * 2^-16
38  *    0.41868592 = 27439 * 2^-16
39  *    0.08131409 =  5329 * 2^-16
40  * These constants are defined in jccolor-neon.c
41  *
42  * To ensure rounding gives correct values, we add 0.5 to Cb and Cr.
43  */
44 
jsimd_rgb_ycc_convert_neon(JDIMENSION image_width,JSAMPARRAY input_buf,JSAMPIMAGE output_buf,JDIMENSION output_row,int num_rows)45 void jsimd_rgb_ycc_convert_neon(JDIMENSION image_width,
46                                 JSAMPARRAY input_buf,
47                                 JSAMPIMAGE output_buf,
48                                 JDIMENSION output_row,
49                                 int num_rows)
50 {
51   /* Pointer to RGB(X/A) input data. */
52   JSAMPROW inptr;
53   /* Pointers to Y, Cb and Cr output data. */
54   JSAMPROW outptr0, outptr1, outptr2;
55 
56   /* Setup conversion constants. */
57   const uint16x8_t consts = vld1q_u16(jsimd_rgb_ycc_neon_consts);
58   const uint32x4_t scaled_128_5 = vdupq_n_u32((128 << 16) + 32767);
59 
60   while (--num_rows >= 0) {
61     inptr = *input_buf++;
62     outptr0 = output_buf[0][output_row];
63     outptr1 = output_buf[1][output_row];
64     outptr2 = output_buf[2][output_row];
65     output_row++;
66 
67     int cols_remaining = image_width;
68     for (; cols_remaining >= 16; cols_remaining -= 16) {
69 
70 #if RGB_PIXELSIZE == 4
71       uint8x16x4_t input_pixels = vld4q_u8(inptr);
72 #else
73       uint8x16x3_t input_pixels = vld3q_u8(inptr);
74 #endif
75       uint16x8_t r_l = vmovl_u8(vget_low_u8(input_pixels.val[RGB_RED]));
76       uint16x8_t g_l = vmovl_u8(vget_low_u8(input_pixels.val[RGB_GREEN]));
77       uint16x8_t b_l = vmovl_u8(vget_low_u8(input_pixels.val[RGB_BLUE]));
78       uint16x8_t r_h = vmovl_u8(vget_high_u8(input_pixels.val[RGB_RED]));
79       uint16x8_t g_h = vmovl_u8(vget_high_u8(input_pixels.val[RGB_GREEN]));
80       uint16x8_t b_h = vmovl_u8(vget_high_u8(input_pixels.val[RGB_BLUE]));
81 
82       /* Compute Y = 0.29900 * R + 0.58700 * G + 0.11400 * B */
83       uint32x4_t y_ll = vmull_laneq_u16(vget_low_u16(r_l), consts, 0);
84       y_ll = vmlal_laneq_u16(y_ll, vget_low_u16(g_l), consts, 1);
85       y_ll = vmlal_laneq_u16(y_ll, vget_low_u16(b_l), consts, 2);
86       uint32x4_t y_lh = vmull_high_laneq_u16(r_l, consts, 0);
87       y_lh = vmlal_high_laneq_u16(y_lh, g_l, consts, 1);
88       y_lh = vmlal_high_laneq_u16(y_lh, b_l, consts, 2);
89       uint32x4_t y_hl = vmull_laneq_u16(vget_low_u16(r_h), consts, 0);
90       y_hl = vmlal_laneq_u16(y_hl, vget_low_u16(g_h), consts, 1);
91       y_hl = vmlal_laneq_u16(y_hl, vget_low_u16(b_h), consts, 2);
92       uint32x4_t y_hh = vmull_high_laneq_u16(r_h, consts, 0);
93       y_hh = vmlal_high_laneq_u16(y_hh, g_h, consts, 1);
94       y_hh = vmlal_high_laneq_u16(y_hh, b_h, consts, 2);
95 
96       /* Compute Cb = -0.16874 * R - 0.33126 * G + 0.50000 * B  + 128 */
97       uint32x4_t cb_ll = scaled_128_5;
98       cb_ll = vmlsl_laneq_u16(cb_ll, vget_low_u16(r_l), consts, 3);
99       cb_ll = vmlsl_laneq_u16(cb_ll, vget_low_u16(g_l), consts, 4);
100       cb_ll = vmlal_laneq_u16(cb_ll, vget_low_u16(b_l), consts, 5);
101       uint32x4_t cb_lh = scaled_128_5;
102       cb_lh = vmlsl_high_laneq_u16(cb_lh, r_l, consts, 3);
103       cb_lh = vmlsl_high_laneq_u16(cb_lh, g_l, consts, 4);
104       cb_lh = vmlal_high_laneq_u16(cb_lh, b_l, consts, 5);
105       uint32x4_t cb_hl = scaled_128_5;
106       cb_hl = vmlsl_laneq_u16(cb_hl, vget_low_u16(r_h), consts, 3);
107       cb_hl = vmlsl_laneq_u16(cb_hl, vget_low_u16(g_h), consts, 4);
108       cb_hl = vmlal_laneq_u16(cb_hl, vget_low_u16(b_h), consts, 5);
109       uint32x4_t cb_hh = scaled_128_5;
110       cb_hh = vmlsl_high_laneq_u16(cb_hh, r_h, consts, 3);
111       cb_hh = vmlsl_high_laneq_u16(cb_hh, g_h, consts, 4);
112       cb_hh = vmlal_high_laneq_u16(cb_hh, b_h, consts, 5);
113 
114       /* Compute Cr = 0.50000 * R - 0.41869 * G - 0.08131 * B  + 128 */
115       uint32x4_t cr_ll = scaled_128_5;
116       cr_ll = vmlal_laneq_u16(cr_ll, vget_low_u16(r_l), consts, 5);
117       cr_ll = vmlsl_laneq_u16(cr_ll, vget_low_u16(g_l), consts, 6);
118       cr_ll = vmlsl_laneq_u16(cr_ll, vget_low_u16(b_l), consts, 7);
119       uint32x4_t cr_lh = scaled_128_5;
120       cr_lh = vmlal_high_laneq_u16(cr_lh, r_l, consts, 5);
121       cr_lh = vmlsl_high_laneq_u16(cr_lh, g_l, consts, 6);
122       cr_lh = vmlsl_high_laneq_u16(cr_lh, b_l, consts, 7);
123       uint32x4_t cr_hl = scaled_128_5;
124       cr_hl = vmlal_laneq_u16(cr_hl, vget_low_u16(r_h), consts, 5);
125       cr_hl = vmlsl_laneq_u16(cr_hl, vget_low_u16(g_h), consts, 6);
126       cr_hl = vmlsl_laneq_u16(cr_hl, vget_low_u16(b_h), consts, 7);
127       uint32x4_t cr_hh = scaled_128_5;
128       cr_hh = vmlal_high_laneq_u16(cr_hh, r_h, consts, 5);
129       cr_hh = vmlsl_high_laneq_u16(cr_hh, g_h, consts, 6);
130       cr_hh = vmlsl_high_laneq_u16(cr_hh, b_h, consts, 7);
131 
132       /* Descale Y values (rounding right shift) and narrow to 16-bit. */
133       uint16x8_t y_l = vcombine_u16(vrshrn_n_u32(y_ll, 16),
134                                     vrshrn_n_u32(y_lh, 16));
135       uint16x8_t y_h = vcombine_u16(vrshrn_n_u32(y_hl, 16),
136                                     vrshrn_n_u32(y_hh, 16));
137       /* Descale Cb values (right shift) and narrow to 16-bit. */
138       uint16x8_t cb_l = vcombine_u16(vshrn_n_u32(cb_ll, 16),
139                                      vshrn_n_u32(cb_lh, 16));
140       uint16x8_t cb_h = vcombine_u16(vshrn_n_u32(cb_hl, 16),
141                                      vshrn_n_u32(cb_hh, 16));
142       /* Descale Cr values (right shift) and narrow to 16-bit. */
143       uint16x8_t cr_l = vcombine_u16(vshrn_n_u32(cr_ll, 16),
144                                      vshrn_n_u32(cr_lh, 16));
145       uint16x8_t cr_h = vcombine_u16(vshrn_n_u32(cr_hl, 16),
146                                      vshrn_n_u32(cr_hh, 16));
147       /* Narrow Y, Cb and Cr values to 8-bit and store to memory. Buffer */
148       /* overwrite is permitted up to the next multiple of ALIGN_SIZE bytes. */
149       vst1q_u8(outptr0, vcombine_u8(vmovn_u16(y_l), vmovn_u16(y_h)));
150       vst1q_u8(outptr1, vcombine_u8(vmovn_u16(cb_l), vmovn_u16(cb_h)));
151       vst1q_u8(outptr2, vcombine_u8(vmovn_u16(cr_l), vmovn_u16(cr_h)));
152 
153       /* Increment pointers. */
154       inptr += (16 * RGB_PIXELSIZE);
155       outptr0 += 16;
156       outptr1 += 16;
157       outptr2 += 16;
158     }
159 
160     if (cols_remaining > 8) {
161       /* To prevent buffer overread by the vector load instructions, the */
162       /* last (image_width % 16) columns of data are first memcopied to a */
163       /* temporary buffer large enough to accommodate the vector load. */
164       ALIGN(16) uint8_t tmp_buf[16 * RGB_PIXELSIZE];
165       memcpy(tmp_buf, inptr, cols_remaining * RGB_PIXELSIZE);
166       inptr = tmp_buf;
167 
168 #if RGB_PIXELSIZE == 4
169       uint8x16x4_t input_pixels = vld4q_u8(inptr);
170 #else
171       uint8x16x3_t input_pixels = vld3q_u8(inptr);
172 #endif
173       uint16x8_t r_l = vmovl_u8(vget_low_u8(input_pixels.val[RGB_RED]));
174       uint16x8_t g_l = vmovl_u8(vget_low_u8(input_pixels.val[RGB_GREEN]));
175       uint16x8_t b_l = vmovl_u8(vget_low_u8(input_pixels.val[RGB_BLUE]));
176       uint16x8_t r_h = vmovl_u8(vget_high_u8(input_pixels.val[RGB_RED]));
177       uint16x8_t g_h = vmovl_u8(vget_high_u8(input_pixels.val[RGB_GREEN]));
178       uint16x8_t b_h = vmovl_u8(vget_high_u8(input_pixels.val[RGB_BLUE]));
179 
180       /* Compute Y = 0.29900 * R + 0.58700 * G + 0.11400 * B */
181       uint32x4_t y_ll = vmull_laneq_u16(vget_low_u16(r_l), consts, 0);
182       y_ll = vmlal_laneq_u16(y_ll, vget_low_u16(g_l), consts, 1);
183       y_ll = vmlal_laneq_u16(y_ll, vget_low_u16(b_l), consts, 2);
184       uint32x4_t y_lh = vmull_high_laneq_u16(r_l, consts, 0);
185       y_lh = vmlal_high_laneq_u16(y_lh, g_l, consts, 1);
186       y_lh = vmlal_high_laneq_u16(y_lh, b_l, consts, 2);
187       uint32x4_t y_hl = vmull_laneq_u16(vget_low_u16(r_h), consts, 0);
188       y_hl = vmlal_laneq_u16(y_hl, vget_low_u16(g_h), consts, 1);
189       y_hl = vmlal_laneq_u16(y_hl, vget_low_u16(b_h), consts, 2);
190       uint32x4_t y_hh = vmull_high_laneq_u16(r_h, consts, 0);
191       y_hh = vmlal_high_laneq_u16(y_hh, g_h, consts, 1);
192       y_hh = vmlal_high_laneq_u16(y_hh, b_h, consts, 2);
193 
194       /* Compute Cb = -0.16874 * R - 0.33126 * G + 0.50000 * B  + 128 */
195       uint32x4_t cb_ll = scaled_128_5;
196       cb_ll = vmlsl_laneq_u16(cb_ll, vget_low_u16(r_l), consts, 3);
197       cb_ll = vmlsl_laneq_u16(cb_ll, vget_low_u16(g_l), consts, 4);
198       cb_ll = vmlal_laneq_u16(cb_ll, vget_low_u16(b_l), consts, 5);
199       uint32x4_t cb_lh = scaled_128_5;
200       cb_lh = vmlsl_high_laneq_u16(cb_lh, r_l, consts, 3);
201       cb_lh = vmlsl_high_laneq_u16(cb_lh, g_l, consts, 4);
202       cb_lh = vmlal_high_laneq_u16(cb_lh, b_l, consts, 5);
203       uint32x4_t cb_hl = scaled_128_5;
204       cb_hl = vmlsl_laneq_u16(cb_hl, vget_low_u16(r_h), consts, 3);
205       cb_hl = vmlsl_laneq_u16(cb_hl, vget_low_u16(g_h), consts, 4);
206       cb_hl = vmlal_laneq_u16(cb_hl, vget_low_u16(b_h), consts, 5);
207       uint32x4_t cb_hh = scaled_128_5;
208       cb_hh = vmlsl_high_laneq_u16(cb_hh, r_h, consts, 3);
209       cb_hh = vmlsl_high_laneq_u16(cb_hh, g_h, consts, 4);
210       cb_hh = vmlal_high_laneq_u16(cb_hh, b_h, consts, 5);
211 
212       /* Compute Cr = 0.50000 * R - 0.41869 * G - 0.08131 * B  + 128 */
213       uint32x4_t cr_ll = scaled_128_5;
214       cr_ll = vmlal_laneq_u16(cr_ll, vget_low_u16(r_l), consts, 5);
215       cr_ll = vmlsl_laneq_u16(cr_ll, vget_low_u16(g_l), consts, 6);
216       cr_ll = vmlsl_laneq_u16(cr_ll, vget_low_u16(b_l), consts, 7);
217       uint32x4_t cr_lh = scaled_128_5;
218       cr_lh = vmlal_high_laneq_u16(cr_lh, r_l, consts, 5);
219       cr_lh = vmlsl_high_laneq_u16(cr_lh, g_l, consts, 6);
220       cr_lh = vmlsl_high_laneq_u16(cr_lh, b_l, consts, 7);
221       uint32x4_t cr_hl = scaled_128_5;
222       cr_hl = vmlal_laneq_u16(cr_hl, vget_low_u16(r_h), consts, 5);
223       cr_hl = vmlsl_laneq_u16(cr_hl, vget_low_u16(g_h), consts, 6);
224       cr_hl = vmlsl_laneq_u16(cr_hl, vget_low_u16(b_h), consts, 7);
225       uint32x4_t cr_hh = scaled_128_5;
226       cr_hh = vmlal_high_laneq_u16(cr_hh, r_h, consts, 5);
227       cr_hh = vmlsl_high_laneq_u16(cr_hh, g_h, consts, 6);
228       cr_hh = vmlsl_high_laneq_u16(cr_hh, b_h, consts, 7);
229 
230       /* Descale Y values (rounding right shift) and narrow to 16-bit. */
231       uint16x8_t y_l = vcombine_u16(vrshrn_n_u32(y_ll, 16),
232                                     vrshrn_n_u32(y_lh, 16));
233       uint16x8_t y_h = vcombine_u16(vrshrn_n_u32(y_hl, 16),
234                                     vrshrn_n_u32(y_hh, 16));
235       /* Descale Cb values (right shift) and narrow to 16-bit. */
236       uint16x8_t cb_l = vcombine_u16(vshrn_n_u32(cb_ll, 16),
237                                      vshrn_n_u32(cb_lh, 16));
238       uint16x8_t cb_h = vcombine_u16(vshrn_n_u32(cb_hl, 16),
239                                      vshrn_n_u32(cb_hh, 16));
240       /* Descale Cr values (right shift) and narrow to 16-bit. */
241       uint16x8_t cr_l = vcombine_u16(vshrn_n_u32(cr_ll, 16),
242                                      vshrn_n_u32(cr_lh, 16));
243       uint16x8_t cr_h = vcombine_u16(vshrn_n_u32(cr_hl, 16),
244                                      vshrn_n_u32(cr_hh, 16));
245       /* Narrow Y, Cb and Cr values to 8-bit and store to memory. Buffer */
246       /* overwrite is permitted up to the next multiple of ALIGN_SIZE bytes. */
247       vst1q_u8(outptr0, vcombine_u8(vmovn_u16(y_l), vmovn_u16(y_h)));
248       vst1q_u8(outptr1, vcombine_u8(vmovn_u16(cb_l), vmovn_u16(cb_h)));
249       vst1q_u8(outptr2, vcombine_u8(vmovn_u16(cr_l), vmovn_u16(cr_h)));
250 
251     } else if (cols_remaining > 0) {
252       /* To prevent buffer overread by the vector load instructions, the */
253       /* last (image_width % 8) columns of data are first memcopied to a */
254       /* temporary buffer large enough to accommodate the vector load. */
255       ALIGN(16) uint8_t tmp_buf[8 * RGB_PIXELSIZE];
256       memcpy(tmp_buf, inptr, cols_remaining * RGB_PIXELSIZE);
257       inptr = tmp_buf;
258 
259 #if RGB_PIXELSIZE == 4
260       uint8x8x4_t input_pixels = vld4_u8(inptr);
261 #else
262       uint8x8x3_t input_pixels = vld3_u8(inptr);
263 #endif
264       uint16x8_t r = vmovl_u8(input_pixels.val[RGB_RED]);
265       uint16x8_t g = vmovl_u8(input_pixels.val[RGB_GREEN]);
266       uint16x8_t b = vmovl_u8(input_pixels.val[RGB_BLUE]);
267 
268       /* Compute Y = 0.29900 * R + 0.58700 * G + 0.11400 * B */
269       uint32x4_t y_l = vmull_laneq_u16(vget_low_u16(r), consts, 0);
270       y_l = vmlal_laneq_u16(y_l, vget_low_u16(g), consts, 1);
271       y_l = vmlal_laneq_u16(y_l, vget_low_u16(b), consts, 2);
272       uint32x4_t y_h = vmull_high_laneq_u16(r, consts, 0);
273       y_h = vmlal_high_laneq_u16(y_h, g, consts, 1);
274       y_h = vmlal_high_laneq_u16(y_h, b, consts, 2);
275 
276       /* Compute Cb = -0.16874 * R - 0.33126 * G + 0.50000 * B  + 128 */
277       uint32x4_t cb_l = scaled_128_5;
278       cb_l = vmlsl_laneq_u16(cb_l, vget_low_u16(r), consts, 3);
279       cb_l = vmlsl_laneq_u16(cb_l, vget_low_u16(g), consts, 4);
280       cb_l = vmlal_laneq_u16(cb_l, vget_low_u16(b), consts, 5);
281       uint32x4_t cb_h = scaled_128_5;
282       cb_h = vmlsl_high_laneq_u16(cb_h, r, consts, 3);
283       cb_h = vmlsl_high_laneq_u16(cb_h, g, consts, 4);
284       cb_h = vmlal_high_laneq_u16(cb_h, b, consts, 5);
285 
286       /* Compute Cr = 0.50000 * R - 0.41869 * G - 0.08131 * B  + 128 */
287       uint32x4_t cr_l = scaled_128_5;
288       cr_l = vmlal_laneq_u16(cr_l, vget_low_u16(r), consts, 5);
289       cr_l = vmlsl_laneq_u16(cr_l, vget_low_u16(g), consts, 6);
290       cr_l = vmlsl_laneq_u16(cr_l, vget_low_u16(b), consts, 7);
291       uint32x4_t cr_h = scaled_128_5;
292       cr_h = vmlal_high_laneq_u16(cr_h, r, consts, 5);
293       cr_h = vmlsl_high_laneq_u16(cr_h, g, consts, 6);
294       cr_h = vmlsl_high_laneq_u16(cr_h, b, consts, 7);
295 
296       /* Descale Y values (rounding right shift) and narrow to 16-bit. */
297       uint16x8_t y_u16 = vcombine_u16(vrshrn_n_u32(y_l, 16),
298                                       vrshrn_n_u32(y_h, 16));
299       /* Descale Cb values (right shift) and narrow to 16-bit. */
300       uint16x8_t cb_u16 = vcombine_u16(vshrn_n_u32(cb_l, 16),
301                                        vshrn_n_u32(cb_h, 16));
302       /* Descale Cr values (right shift) and narrow to 16-bit. */
303       uint16x8_t cr_u16 = vcombine_u16(vshrn_n_u32(cr_l, 16),
304                                        vshrn_n_u32(cr_h, 16));
305       /* Narrow Y, Cb and Cr values to 8-bit and store to memory. Buffer */
306       /* overwrite is permitted up to the next multiple of ALIGN_SIZE bytes. */
307       vst1_u8(outptr0, vmovn_u16(y_u16));
308       vst1_u8(outptr1, vmovn_u16(cb_u16));
309       vst1_u8(outptr2, vmovn_u16(cr_u16));
310     }
311   }
312 }
313