1 /*
2 * Copyright 2016 The LibYuv Project Authors. All rights reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include <assert.h>
12
13 #include "libyuv/scale_row.h"
14
15 // This module is for GCC MSA
16 #if !defined(LIBYUV_DISABLE_MSA) && defined(__mips_msa)
17 #include "libyuv/macros_msa.h"
18
19 #ifdef __cplusplus
20 namespace libyuv {
21 extern "C" {
22 #endif
23
24 #define LOAD_INDEXED_DATA(srcp, indx0, out0) \
25 { \
26 out0[0] = srcp[indx0[0]]; \
27 out0[1] = srcp[indx0[1]]; \
28 out0[2] = srcp[indx0[2]]; \
29 out0[3] = srcp[indx0[3]]; \
30 }
31
ScaleARGBRowDown2_MSA(const uint8_t * src_argb,ptrdiff_t src_stride,uint8_t * dst_argb,int dst_width)32 void ScaleARGBRowDown2_MSA(const uint8_t* src_argb,
33 ptrdiff_t src_stride,
34 uint8_t* dst_argb,
35 int dst_width) {
36 int x;
37 v16u8 src0, src1, dst0;
38 (void)src_stride;
39
40 for (x = 0; x < dst_width; x += 4) {
41 src0 = (v16u8)__msa_ld_b((v16i8*)src_argb, 0);
42 src1 = (v16u8)__msa_ld_b((v16i8*)src_argb, 16);
43 dst0 = (v16u8)__msa_pckod_w((v4i32)src1, (v4i32)src0);
44 ST_UB(dst0, dst_argb);
45 src_argb += 32;
46 dst_argb += 16;
47 }
48 }
49
ScaleARGBRowDown2Linear_MSA(const uint8_t * src_argb,ptrdiff_t src_stride,uint8_t * dst_argb,int dst_width)50 void ScaleARGBRowDown2Linear_MSA(const uint8_t* src_argb,
51 ptrdiff_t src_stride,
52 uint8_t* dst_argb,
53 int dst_width) {
54 int x;
55 v16u8 src0, src1, vec0, vec1, dst0;
56 (void)src_stride;
57
58 for (x = 0; x < dst_width; x += 4) {
59 src0 = (v16u8)__msa_ld_b((v16i8*)src_argb, 0);
60 src1 = (v16u8)__msa_ld_b((v16i8*)src_argb, 16);
61 vec0 = (v16u8)__msa_pckev_w((v4i32)src1, (v4i32)src0);
62 vec1 = (v16u8)__msa_pckod_w((v4i32)src1, (v4i32)src0);
63 dst0 = (v16u8)__msa_aver_u_b((v16u8)vec0, (v16u8)vec1);
64 ST_UB(dst0, dst_argb);
65 src_argb += 32;
66 dst_argb += 16;
67 }
68 }
69
ScaleARGBRowDown2Box_MSA(const uint8_t * src_argb,ptrdiff_t src_stride,uint8_t * dst_argb,int dst_width)70 void ScaleARGBRowDown2Box_MSA(const uint8_t* src_argb,
71 ptrdiff_t src_stride,
72 uint8_t* dst_argb,
73 int dst_width) {
74 int x;
75 const uint8_t* s = src_argb;
76 const uint8_t* t = src_argb + src_stride;
77 v16u8 src0, src1, src2, src3, vec0, vec1, vec2, vec3, dst0;
78 v8u16 reg0, reg1, reg2, reg3;
79 v16i8 shuffler = {0, 4, 1, 5, 2, 6, 3, 7, 8, 12, 9, 13, 10, 14, 11, 15};
80
81 for (x = 0; x < dst_width; x += 4) {
82 src0 = (v16u8)__msa_ld_b((v16i8*)s, 0);
83 src1 = (v16u8)__msa_ld_b((v16i8*)s, 16);
84 src2 = (v16u8)__msa_ld_b((v16i8*)t, 0);
85 src3 = (v16u8)__msa_ld_b((v16i8*)t, 16);
86 vec0 = (v16u8)__msa_vshf_b(shuffler, (v16i8)src0, (v16i8)src0);
87 vec1 = (v16u8)__msa_vshf_b(shuffler, (v16i8)src1, (v16i8)src1);
88 vec2 = (v16u8)__msa_vshf_b(shuffler, (v16i8)src2, (v16i8)src2);
89 vec3 = (v16u8)__msa_vshf_b(shuffler, (v16i8)src3, (v16i8)src3);
90 reg0 = __msa_hadd_u_h(vec0, vec0);
91 reg1 = __msa_hadd_u_h(vec1, vec1);
92 reg2 = __msa_hadd_u_h(vec2, vec2);
93 reg3 = __msa_hadd_u_h(vec3, vec3);
94 reg0 += reg2;
95 reg1 += reg3;
96 reg0 = (v8u16)__msa_srari_h((v8i16)reg0, 2);
97 reg1 = (v8u16)__msa_srari_h((v8i16)reg1, 2);
98 dst0 = (v16u8)__msa_pckev_b((v16i8)reg1, (v16i8)reg0);
99 ST_UB(dst0, dst_argb);
100 s += 32;
101 t += 32;
102 dst_argb += 16;
103 }
104 }
105
ScaleARGBRowDownEven_MSA(const uint8_t * src_argb,ptrdiff_t src_stride,int32_t src_stepx,uint8_t * dst_argb,int dst_width)106 void ScaleARGBRowDownEven_MSA(const uint8_t* src_argb,
107 ptrdiff_t src_stride,
108 int32_t src_stepx,
109 uint8_t* dst_argb,
110 int dst_width) {
111 int x;
112 int32_t stepx = src_stepx * 4;
113 int32_t data0, data1, data2, data3;
114 (void)src_stride;
115
116 for (x = 0; x < dst_width; x += 4) {
117 data0 = LW(src_argb);
118 data1 = LW(src_argb + stepx);
119 data2 = LW(src_argb + stepx * 2);
120 data3 = LW(src_argb + stepx * 3);
121 SW(data0, dst_argb);
122 SW(data1, dst_argb + 4);
123 SW(data2, dst_argb + 8);
124 SW(data3, dst_argb + 12);
125 src_argb += stepx * 4;
126 dst_argb += 16;
127 }
128 }
129
ScaleARGBRowDownEvenBox_MSA(const uint8_t * src_argb,ptrdiff_t src_stride,int src_stepx,uint8_t * dst_argb,int dst_width)130 void ScaleARGBRowDownEvenBox_MSA(const uint8_t* src_argb,
131 ptrdiff_t src_stride,
132 int src_stepx,
133 uint8_t* dst_argb,
134 int dst_width) {
135 int x;
136 const uint8_t* nxt_argb = src_argb + src_stride;
137 int32_t stepx = src_stepx * 4;
138 int64_t data0, data1, data2, data3;
139 v16u8 src0 = {0}, src1 = {0}, src2 = {0}, src3 = {0};
140 v16u8 vec0, vec1, vec2, vec3;
141 v8u16 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7;
142 v16u8 dst0;
143
144 for (x = 0; x < dst_width; x += 4) {
145 data0 = LD(src_argb);
146 data1 = LD(src_argb + stepx);
147 data2 = LD(src_argb + stepx * 2);
148 data3 = LD(src_argb + stepx * 3);
149 src0 = (v16u8)__msa_insert_d((v2i64)src0, 0, data0);
150 src0 = (v16u8)__msa_insert_d((v2i64)src0, 1, data1);
151 src1 = (v16u8)__msa_insert_d((v2i64)src1, 0, data2);
152 src1 = (v16u8)__msa_insert_d((v2i64)src1, 1, data3);
153 data0 = LD(nxt_argb);
154 data1 = LD(nxt_argb + stepx);
155 data2 = LD(nxt_argb + stepx * 2);
156 data3 = LD(nxt_argb + stepx * 3);
157 src2 = (v16u8)__msa_insert_d((v2i64)src2, 0, data0);
158 src2 = (v16u8)__msa_insert_d((v2i64)src2, 1, data1);
159 src3 = (v16u8)__msa_insert_d((v2i64)src3, 0, data2);
160 src3 = (v16u8)__msa_insert_d((v2i64)src3, 1, data3);
161 vec0 = (v16u8)__msa_ilvr_b((v16i8)src2, (v16i8)src0);
162 vec1 = (v16u8)__msa_ilvr_b((v16i8)src3, (v16i8)src1);
163 vec2 = (v16u8)__msa_ilvl_b((v16i8)src2, (v16i8)src0);
164 vec3 = (v16u8)__msa_ilvl_b((v16i8)src3, (v16i8)src1);
165 reg0 = __msa_hadd_u_h(vec0, vec0);
166 reg1 = __msa_hadd_u_h(vec1, vec1);
167 reg2 = __msa_hadd_u_h(vec2, vec2);
168 reg3 = __msa_hadd_u_h(vec3, vec3);
169 reg4 = (v8u16)__msa_pckev_d((v2i64)reg2, (v2i64)reg0);
170 reg5 = (v8u16)__msa_pckev_d((v2i64)reg3, (v2i64)reg1);
171 reg6 = (v8u16)__msa_pckod_d((v2i64)reg2, (v2i64)reg0);
172 reg7 = (v8u16)__msa_pckod_d((v2i64)reg3, (v2i64)reg1);
173 reg4 += reg6;
174 reg5 += reg7;
175 reg4 = (v8u16)__msa_srari_h((v8i16)reg4, 2);
176 reg5 = (v8u16)__msa_srari_h((v8i16)reg5, 2);
177 dst0 = (v16u8)__msa_pckev_b((v16i8)reg5, (v16i8)reg4);
178 ST_UB(dst0, dst_argb);
179 src_argb += stepx * 4;
180 nxt_argb += stepx * 4;
181 dst_argb += 16;
182 }
183 }
184
ScaleRowDown2_MSA(const uint8_t * src_ptr,ptrdiff_t src_stride,uint8_t * dst,int dst_width)185 void ScaleRowDown2_MSA(const uint8_t* src_ptr,
186 ptrdiff_t src_stride,
187 uint8_t* dst,
188 int dst_width) {
189 int x;
190 v16u8 src0, src1, src2, src3, dst0, dst1;
191 (void)src_stride;
192
193 for (x = 0; x < dst_width; x += 32) {
194 src0 = (v16u8)__msa_ld_b((v16i8*)src_ptr, 0);
195 src1 = (v16u8)__msa_ld_b((v16i8*)src_ptr, 16);
196 src2 = (v16u8)__msa_ld_b((v16i8*)src_ptr, 32);
197 src3 = (v16u8)__msa_ld_b((v16i8*)src_ptr, 48);
198 dst0 = (v16u8)__msa_pckod_b((v16i8)src1, (v16i8)src0);
199 dst1 = (v16u8)__msa_pckod_b((v16i8)src3, (v16i8)src2);
200 ST_UB2(dst0, dst1, dst, 16);
201 src_ptr += 64;
202 dst += 32;
203 }
204 }
205
ScaleRowDown2Linear_MSA(const uint8_t * src_ptr,ptrdiff_t src_stride,uint8_t * dst,int dst_width)206 void ScaleRowDown2Linear_MSA(const uint8_t* src_ptr,
207 ptrdiff_t src_stride,
208 uint8_t* dst,
209 int dst_width) {
210 int x;
211 v16u8 src0, src1, src2, src3, vec0, vec1, vec2, vec3, dst0, dst1;
212 (void)src_stride;
213
214 for (x = 0; x < dst_width; x += 32) {
215 src0 = (v16u8)__msa_ld_b((v16i8*)src_ptr, 0);
216 src1 = (v16u8)__msa_ld_b((v16i8*)src_ptr, 16);
217 src2 = (v16u8)__msa_ld_b((v16i8*)src_ptr, 32);
218 src3 = (v16u8)__msa_ld_b((v16i8*)src_ptr, 48);
219 vec0 = (v16u8)__msa_pckev_b((v16i8)src1, (v16i8)src0);
220 vec2 = (v16u8)__msa_pckev_b((v16i8)src3, (v16i8)src2);
221 vec1 = (v16u8)__msa_pckod_b((v16i8)src1, (v16i8)src0);
222 vec3 = (v16u8)__msa_pckod_b((v16i8)src3, (v16i8)src2);
223 dst0 = __msa_aver_u_b(vec1, vec0);
224 dst1 = __msa_aver_u_b(vec3, vec2);
225 ST_UB2(dst0, dst1, dst, 16);
226 src_ptr += 64;
227 dst += 32;
228 }
229 }
230
ScaleRowDown2Box_MSA(const uint8_t * src_ptr,ptrdiff_t src_stride,uint8_t * dst,int dst_width)231 void ScaleRowDown2Box_MSA(const uint8_t* src_ptr,
232 ptrdiff_t src_stride,
233 uint8_t* dst,
234 int dst_width) {
235 int x;
236 const uint8_t* s = src_ptr;
237 const uint8_t* t = src_ptr + src_stride;
238 v16u8 src0, src1, src2, src3, src4, src5, src6, src7, dst0, dst1;
239 v8u16 vec0, vec1, vec2, vec3;
240
241 for (x = 0; x < dst_width; x += 32) {
242 src0 = (v16u8)__msa_ld_b((v16i8*)s, 0);
243 src1 = (v16u8)__msa_ld_b((v16i8*)s, 16);
244 src2 = (v16u8)__msa_ld_b((v16i8*)s, 32);
245 src3 = (v16u8)__msa_ld_b((v16i8*)s, 48);
246 src4 = (v16u8)__msa_ld_b((v16i8*)t, 0);
247 src5 = (v16u8)__msa_ld_b((v16i8*)t, 16);
248 src6 = (v16u8)__msa_ld_b((v16i8*)t, 32);
249 src7 = (v16u8)__msa_ld_b((v16i8*)t, 48);
250 vec0 = __msa_hadd_u_h(src0, src0);
251 vec1 = __msa_hadd_u_h(src1, src1);
252 vec2 = __msa_hadd_u_h(src2, src2);
253 vec3 = __msa_hadd_u_h(src3, src3);
254 vec0 += __msa_hadd_u_h(src4, src4);
255 vec1 += __msa_hadd_u_h(src5, src5);
256 vec2 += __msa_hadd_u_h(src6, src6);
257 vec3 += __msa_hadd_u_h(src7, src7);
258 vec0 = (v8u16)__msa_srari_h((v8i16)vec0, 2);
259 vec1 = (v8u16)__msa_srari_h((v8i16)vec1, 2);
260 vec2 = (v8u16)__msa_srari_h((v8i16)vec2, 2);
261 vec3 = (v8u16)__msa_srari_h((v8i16)vec3, 2);
262 dst0 = (v16u8)__msa_pckev_b((v16i8)vec1, (v16i8)vec0);
263 dst1 = (v16u8)__msa_pckev_b((v16i8)vec3, (v16i8)vec2);
264 ST_UB2(dst0, dst1, dst, 16);
265 s += 64;
266 t += 64;
267 dst += 32;
268 }
269 }
270
ScaleRowDown4_MSA(const uint8_t * src_ptr,ptrdiff_t src_stride,uint8_t * dst,int dst_width)271 void ScaleRowDown4_MSA(const uint8_t* src_ptr,
272 ptrdiff_t src_stride,
273 uint8_t* dst,
274 int dst_width) {
275 int x;
276 v16u8 src0, src1, src2, src3, vec0, vec1, dst0;
277 (void)src_stride;
278
279 for (x = 0; x < dst_width; x += 16) {
280 src0 = (v16u8)__msa_ld_b((v16i8*)src_ptr, 0);
281 src1 = (v16u8)__msa_ld_b((v16i8*)src_ptr, 16);
282 src2 = (v16u8)__msa_ld_b((v16i8*)src_ptr, 32);
283 src3 = (v16u8)__msa_ld_b((v16i8*)src_ptr, 48);
284 vec0 = (v16u8)__msa_pckev_b((v16i8)src1, (v16i8)src0);
285 vec1 = (v16u8)__msa_pckev_b((v16i8)src3, (v16i8)src2);
286 dst0 = (v16u8)__msa_pckod_b((v16i8)vec1, (v16i8)vec0);
287 ST_UB(dst0, dst);
288 src_ptr += 64;
289 dst += 16;
290 }
291 }
292
ScaleRowDown4Box_MSA(const uint8_t * src_ptr,ptrdiff_t src_stride,uint8_t * dst,int dst_width)293 void ScaleRowDown4Box_MSA(const uint8_t* src_ptr,
294 ptrdiff_t src_stride,
295 uint8_t* dst,
296 int dst_width) {
297 int x;
298 const uint8_t* s = src_ptr;
299 const uint8_t* t0 = s + src_stride;
300 const uint8_t* t1 = s + src_stride * 2;
301 const uint8_t* t2 = s + src_stride * 3;
302 v16u8 src0, src1, src2, src3, src4, src5, src6, src7, dst0;
303 v8u16 vec0, vec1, vec2, vec3;
304 v4u32 reg0, reg1, reg2, reg3;
305
306 for (x = 0; x < dst_width; x += 16) {
307 src0 = (v16u8)__msa_ld_b((v16i8*)s, 0);
308 src1 = (v16u8)__msa_ld_b((v16i8*)s, 16);
309 src2 = (v16u8)__msa_ld_b((v16i8*)s, 32);
310 src3 = (v16u8)__msa_ld_b((v16i8*)s, 48);
311 src4 = (v16u8)__msa_ld_b((v16i8*)t0, 0);
312 src5 = (v16u8)__msa_ld_b((v16i8*)t0, 16);
313 src6 = (v16u8)__msa_ld_b((v16i8*)t0, 32);
314 src7 = (v16u8)__msa_ld_b((v16i8*)t0, 48);
315 vec0 = __msa_hadd_u_h(src0, src0);
316 vec1 = __msa_hadd_u_h(src1, src1);
317 vec2 = __msa_hadd_u_h(src2, src2);
318 vec3 = __msa_hadd_u_h(src3, src3);
319 vec0 += __msa_hadd_u_h(src4, src4);
320 vec1 += __msa_hadd_u_h(src5, src5);
321 vec2 += __msa_hadd_u_h(src6, src6);
322 vec3 += __msa_hadd_u_h(src7, src7);
323 src0 = (v16u8)__msa_ld_b((v16i8*)t1, 0);
324 src1 = (v16u8)__msa_ld_b((v16i8*)t1, 16);
325 src2 = (v16u8)__msa_ld_b((v16i8*)t1, 32);
326 src3 = (v16u8)__msa_ld_b((v16i8*)t1, 48);
327 src4 = (v16u8)__msa_ld_b((v16i8*)t2, 0);
328 src5 = (v16u8)__msa_ld_b((v16i8*)t2, 16);
329 src6 = (v16u8)__msa_ld_b((v16i8*)t2, 32);
330 src7 = (v16u8)__msa_ld_b((v16i8*)t2, 48);
331 vec0 += __msa_hadd_u_h(src0, src0);
332 vec1 += __msa_hadd_u_h(src1, src1);
333 vec2 += __msa_hadd_u_h(src2, src2);
334 vec3 += __msa_hadd_u_h(src3, src3);
335 vec0 += __msa_hadd_u_h(src4, src4);
336 vec1 += __msa_hadd_u_h(src5, src5);
337 vec2 += __msa_hadd_u_h(src6, src6);
338 vec3 += __msa_hadd_u_h(src7, src7);
339 reg0 = __msa_hadd_u_w(vec0, vec0);
340 reg1 = __msa_hadd_u_w(vec1, vec1);
341 reg2 = __msa_hadd_u_w(vec2, vec2);
342 reg3 = __msa_hadd_u_w(vec3, vec3);
343 reg0 = (v4u32)__msa_srari_w((v4i32)reg0, 4);
344 reg1 = (v4u32)__msa_srari_w((v4i32)reg1, 4);
345 reg2 = (v4u32)__msa_srari_w((v4i32)reg2, 4);
346 reg3 = (v4u32)__msa_srari_w((v4i32)reg3, 4);
347 vec0 = (v8u16)__msa_pckev_h((v8i16)reg1, (v8i16)reg0);
348 vec1 = (v8u16)__msa_pckev_h((v8i16)reg3, (v8i16)reg2);
349 dst0 = (v16u8)__msa_pckev_b((v16i8)vec1, (v16i8)vec0);
350 ST_UB(dst0, dst);
351 s += 64;
352 t0 += 64;
353 t1 += 64;
354 t2 += 64;
355 dst += 16;
356 }
357 }
358
ScaleRowDown38_MSA(const uint8_t * src_ptr,ptrdiff_t src_stride,uint8_t * dst,int dst_width)359 void ScaleRowDown38_MSA(const uint8_t* src_ptr,
360 ptrdiff_t src_stride,
361 uint8_t* dst,
362 int dst_width) {
363 int x, width;
364 uint64_t dst0;
365 uint32_t dst1;
366 v16u8 src0, src1, vec0;
367 v16i8 mask = {0, 3, 6, 8, 11, 14, 16, 19, 22, 24, 27, 30, 0, 0, 0, 0};
368 (void)src_stride;
369
370 assert(dst_width % 3 == 0);
371 width = dst_width / 3;
372
373 for (x = 0; x < width; x += 4) {
374 src0 = (v16u8)__msa_ld_b((v16i8*)src_ptr, 0);
375 src1 = (v16u8)__msa_ld_b((v16i8*)src_ptr, 16);
376 vec0 = (v16u8)__msa_vshf_b(mask, (v16i8)src1, (v16i8)src0);
377 dst0 = __msa_copy_u_d((v2i64)vec0, 0);
378 dst1 = __msa_copy_u_w((v4i32)vec0, 2);
379 SD(dst0, dst);
380 SW(dst1, dst + 8);
381 src_ptr += 32;
382 dst += 12;
383 }
384 }
385
ScaleRowDown38_2_Box_MSA(const uint8_t * src_ptr,ptrdiff_t src_stride,uint8_t * dst_ptr,int dst_width)386 void ScaleRowDown38_2_Box_MSA(const uint8_t* src_ptr,
387 ptrdiff_t src_stride,
388 uint8_t* dst_ptr,
389 int dst_width) {
390 int x, width;
391 const uint8_t* s = src_ptr;
392 const uint8_t* t = src_ptr + src_stride;
393 uint64_t dst0;
394 uint32_t dst1;
395 v16u8 src0, src1, src2, src3, out;
396 v8u16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
397 v4u32 tmp0, tmp1, tmp2, tmp3, tmp4;
398 v8i16 zero = {0};
399 v8i16 mask = {0, 1, 2, 8, 3, 4, 5, 9};
400 v16i8 dst_mask = {0, 2, 16, 4, 6, 18, 8, 10, 20, 12, 14, 22, 0, 0, 0, 0};
401 v4u32 const_0x2AAA = (v4u32)__msa_fill_w(0x2AAA);
402 v4u32 const_0x4000 = (v4u32)__msa_fill_w(0x4000);
403
404 assert((dst_width % 3 == 0) && (dst_width > 0));
405 width = dst_width / 3;
406
407 for (x = 0; x < width; x += 4) {
408 src0 = (v16u8)__msa_ld_b((v16i8*)s, 0);
409 src1 = (v16u8)__msa_ld_b((v16i8*)s, 16);
410 src2 = (v16u8)__msa_ld_b((v16i8*)t, 0);
411 src3 = (v16u8)__msa_ld_b((v16i8*)t, 16);
412 vec0 = (v8u16)__msa_ilvr_b((v16i8)src2, (v16i8)src0);
413 vec1 = (v8u16)__msa_ilvl_b((v16i8)src2, (v16i8)src0);
414 vec2 = (v8u16)__msa_ilvr_b((v16i8)src3, (v16i8)src1);
415 vec3 = (v8u16)__msa_ilvl_b((v16i8)src3, (v16i8)src1);
416 vec0 = __msa_hadd_u_h((v16u8)vec0, (v16u8)vec0);
417 vec1 = __msa_hadd_u_h((v16u8)vec1, (v16u8)vec1);
418 vec2 = __msa_hadd_u_h((v16u8)vec2, (v16u8)vec2);
419 vec3 = __msa_hadd_u_h((v16u8)vec3, (v16u8)vec3);
420 vec4 = (v8u16)__msa_vshf_h(mask, zero, (v8i16)vec0);
421 vec5 = (v8u16)__msa_vshf_h(mask, zero, (v8i16)vec1);
422 vec6 = (v8u16)__msa_vshf_h(mask, zero, (v8i16)vec2);
423 vec7 = (v8u16)__msa_vshf_h(mask, zero, (v8i16)vec3);
424 vec0 = (v8u16)__msa_pckod_w((v4i32)vec1, (v4i32)vec0);
425 vec1 = (v8u16)__msa_pckod_w((v4i32)vec3, (v4i32)vec2);
426 vec0 = (v8u16)__msa_pckod_w((v4i32)vec1, (v4i32)vec0);
427 tmp0 = __msa_hadd_u_w(vec4, vec4);
428 tmp1 = __msa_hadd_u_w(vec5, vec5);
429 tmp2 = __msa_hadd_u_w(vec6, vec6);
430 tmp3 = __msa_hadd_u_w(vec7, vec7);
431 tmp4 = __msa_hadd_u_w(vec0, vec0);
432 vec0 = (v8u16)__msa_pckev_h((v8i16)tmp1, (v8i16)tmp0);
433 vec1 = (v8u16)__msa_pckev_h((v8i16)tmp3, (v8i16)tmp2);
434 tmp0 = __msa_hadd_u_w(vec0, vec0);
435 tmp1 = __msa_hadd_u_w(vec1, vec1);
436 tmp0 *= const_0x2AAA;
437 tmp1 *= const_0x2AAA;
438 tmp4 *= const_0x4000;
439 tmp0 = (v4u32)__msa_srai_w((v4i32)tmp0, 16);
440 tmp1 = (v4u32)__msa_srai_w((v4i32)tmp1, 16);
441 tmp4 = (v4u32)__msa_srai_w((v4i32)tmp4, 16);
442 vec0 = (v8u16)__msa_pckev_h((v8i16)tmp1, (v8i16)tmp0);
443 vec1 = (v8u16)__msa_pckev_h((v8i16)tmp4, (v8i16)tmp4);
444 out = (v16u8)__msa_vshf_b(dst_mask, (v16i8)vec1, (v16i8)vec0);
445 dst0 = __msa_copy_u_d((v2i64)out, 0);
446 dst1 = __msa_copy_u_w((v4i32)out, 2);
447 SD(dst0, dst_ptr);
448 SW(dst1, dst_ptr + 8);
449 s += 32;
450 t += 32;
451 dst_ptr += 12;
452 }
453 }
454
ScaleRowDown38_3_Box_MSA(const uint8_t * src_ptr,ptrdiff_t src_stride,uint8_t * dst_ptr,int dst_width)455 void ScaleRowDown38_3_Box_MSA(const uint8_t* src_ptr,
456 ptrdiff_t src_stride,
457 uint8_t* dst_ptr,
458 int dst_width) {
459 int x, width;
460 const uint8_t* s = src_ptr;
461 const uint8_t* t0 = s + src_stride;
462 const uint8_t* t1 = s + src_stride * 2;
463 uint64_t dst0;
464 uint32_t dst1;
465 v16u8 src0, src1, src2, src3, src4, src5, out;
466 v8u16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
467 v4u32 tmp0, tmp1, tmp2, tmp3, tmp4;
468 v8u16 zero = {0};
469 v8i16 mask = {0, 1, 2, 8, 3, 4, 5, 9};
470 v16i8 dst_mask = {0, 2, 16, 4, 6, 18, 8, 10, 20, 12, 14, 22, 0, 0, 0, 0};
471 v4u32 const_0x1C71 = (v4u32)__msa_fill_w(0x1C71);
472 v4u32 const_0x2AAA = (v4u32)__msa_fill_w(0x2AAA);
473
474 assert((dst_width % 3 == 0) && (dst_width > 0));
475 width = dst_width / 3;
476
477 for (x = 0; x < width; x += 4) {
478 src0 = (v16u8)__msa_ld_b((v16i8*)s, 0);
479 src1 = (v16u8)__msa_ld_b((v16i8*)s, 16);
480 src2 = (v16u8)__msa_ld_b((v16i8*)t0, 0);
481 src3 = (v16u8)__msa_ld_b((v16i8*)t0, 16);
482 src4 = (v16u8)__msa_ld_b((v16i8*)t1, 0);
483 src5 = (v16u8)__msa_ld_b((v16i8*)t1, 16);
484 vec0 = (v8u16)__msa_ilvr_b((v16i8)src2, (v16i8)src0);
485 vec1 = (v8u16)__msa_ilvl_b((v16i8)src2, (v16i8)src0);
486 vec2 = (v8u16)__msa_ilvr_b((v16i8)src3, (v16i8)src1);
487 vec3 = (v8u16)__msa_ilvl_b((v16i8)src3, (v16i8)src1);
488 vec4 = (v8u16)__msa_ilvr_b((v16i8)zero, (v16i8)src4);
489 vec5 = (v8u16)__msa_ilvl_b((v16i8)zero, (v16i8)src4);
490 vec6 = (v8u16)__msa_ilvr_b((v16i8)zero, (v16i8)src5);
491 vec7 = (v8u16)__msa_ilvl_b((v16i8)zero, (v16i8)src5);
492 vec0 = __msa_hadd_u_h((v16u8)vec0, (v16u8)vec0);
493 vec1 = __msa_hadd_u_h((v16u8)vec1, (v16u8)vec1);
494 vec2 = __msa_hadd_u_h((v16u8)vec2, (v16u8)vec2);
495 vec3 = __msa_hadd_u_h((v16u8)vec3, (v16u8)vec3);
496 vec0 += __msa_hadd_u_h((v16u8)vec4, (v16u8)vec4);
497 vec1 += __msa_hadd_u_h((v16u8)vec5, (v16u8)vec5);
498 vec2 += __msa_hadd_u_h((v16u8)vec6, (v16u8)vec6);
499 vec3 += __msa_hadd_u_h((v16u8)vec7, (v16u8)vec7);
500 vec4 = (v8u16)__msa_vshf_h(mask, (v8i16)zero, (v8i16)vec0);
501 vec5 = (v8u16)__msa_vshf_h(mask, (v8i16)zero, (v8i16)vec1);
502 vec6 = (v8u16)__msa_vshf_h(mask, (v8i16)zero, (v8i16)vec2);
503 vec7 = (v8u16)__msa_vshf_h(mask, (v8i16)zero, (v8i16)vec3);
504 vec0 = (v8u16)__msa_pckod_w((v4i32)vec1, (v4i32)vec0);
505 vec1 = (v8u16)__msa_pckod_w((v4i32)vec3, (v4i32)vec2);
506 vec0 = (v8u16)__msa_pckod_w((v4i32)vec1, (v4i32)vec0);
507 tmp0 = __msa_hadd_u_w(vec4, vec4);
508 tmp1 = __msa_hadd_u_w(vec5, vec5);
509 tmp2 = __msa_hadd_u_w(vec6, vec6);
510 tmp3 = __msa_hadd_u_w(vec7, vec7);
511 tmp4 = __msa_hadd_u_w(vec0, vec0);
512 vec0 = (v8u16)__msa_pckev_h((v8i16)tmp1, (v8i16)tmp0);
513 vec1 = (v8u16)__msa_pckev_h((v8i16)tmp3, (v8i16)tmp2);
514 tmp0 = __msa_hadd_u_w(vec0, vec0);
515 tmp1 = __msa_hadd_u_w(vec1, vec1);
516 tmp0 *= const_0x1C71;
517 tmp1 *= const_0x1C71;
518 tmp4 *= const_0x2AAA;
519 tmp0 = (v4u32)__msa_srai_w((v4i32)tmp0, 16);
520 tmp1 = (v4u32)__msa_srai_w((v4i32)tmp1, 16);
521 tmp4 = (v4u32)__msa_srai_w((v4i32)tmp4, 16);
522 vec0 = (v8u16)__msa_pckev_h((v8i16)tmp1, (v8i16)tmp0);
523 vec1 = (v8u16)__msa_pckev_h((v8i16)tmp4, (v8i16)tmp4);
524 out = (v16u8)__msa_vshf_b(dst_mask, (v16i8)vec1, (v16i8)vec0);
525 dst0 = __msa_copy_u_d((v2i64)out, 0);
526 dst1 = __msa_copy_u_w((v4i32)out, 2);
527 SD(dst0, dst_ptr);
528 SW(dst1, dst_ptr + 8);
529 s += 32;
530 t0 += 32;
531 t1 += 32;
532 dst_ptr += 12;
533 }
534 }
535
ScaleAddRow_MSA(const uint8_t * src_ptr,uint16_t * dst_ptr,int src_width)536 void ScaleAddRow_MSA(const uint8_t* src_ptr, uint16_t* dst_ptr, int src_width) {
537 int x;
538 v16u8 src0;
539 v8u16 dst0, dst1;
540 v16i8 zero = {0};
541
542 assert(src_width > 0);
543
544 for (x = 0; x < src_width; x += 16) {
545 src0 = LD_UB(src_ptr);
546 dst0 = (v8u16)__msa_ld_h((v8i16*)dst_ptr, 0);
547 dst1 = (v8u16)__msa_ld_h((v8i16*)dst_ptr, 16);
548 dst0 += (v8u16)__msa_ilvr_b(zero, (v16i8)src0);
549 dst1 += (v8u16)__msa_ilvl_b(zero, (v16i8)src0);
550 ST_UH2(dst0, dst1, dst_ptr, 8);
551 src_ptr += 16;
552 dst_ptr += 16;
553 }
554 }
555
ScaleFilterCols_MSA(uint8_t * dst_ptr,const uint8_t * src_ptr,int dst_width,int x,int dx)556 void ScaleFilterCols_MSA(uint8_t* dst_ptr,
557 const uint8_t* src_ptr,
558 int dst_width,
559 int x,
560 int dx) {
561 int j;
562 v4i32 vec_x = __msa_fill_w(x);
563 v4i32 vec_dx = __msa_fill_w(dx);
564 v4i32 vec_const = {0, 1, 2, 3};
565 v4i32 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, vec8, vec9;
566 v4i32 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
567 v8u16 reg0, reg1;
568 v16u8 dst0;
569 v4i32 const_0xFFFF = __msa_fill_w(0xFFFF);
570 v4i32 const_0x40 = __msa_fill_w(0x40);
571
572 vec0 = vec_dx * vec_const;
573 vec1 = vec_dx * 4;
574 vec_x += vec0;
575
576 for (j = 0; j < dst_width - 1; j += 16) {
577 vec2 = vec_x >> 16;
578 vec6 = vec_x & const_0xFFFF;
579 vec_x += vec1;
580 vec3 = vec_x >> 16;
581 vec7 = vec_x & const_0xFFFF;
582 vec_x += vec1;
583 vec4 = vec_x >> 16;
584 vec8 = vec_x & const_0xFFFF;
585 vec_x += vec1;
586 vec5 = vec_x >> 16;
587 vec9 = vec_x & const_0xFFFF;
588 vec_x += vec1;
589 vec6 >>= 9;
590 vec7 >>= 9;
591 vec8 >>= 9;
592 vec9 >>= 9;
593 LOAD_INDEXED_DATA(src_ptr, vec2, tmp0);
594 LOAD_INDEXED_DATA(src_ptr, vec3, tmp1);
595 LOAD_INDEXED_DATA(src_ptr, vec4, tmp2);
596 LOAD_INDEXED_DATA(src_ptr, vec5, tmp3);
597 vec2 += 1;
598 vec3 += 1;
599 vec4 += 1;
600 vec5 += 1;
601 LOAD_INDEXED_DATA(src_ptr, vec2, tmp4);
602 LOAD_INDEXED_DATA(src_ptr, vec3, tmp5);
603 LOAD_INDEXED_DATA(src_ptr, vec4, tmp6);
604 LOAD_INDEXED_DATA(src_ptr, vec5, tmp7);
605 tmp4 -= tmp0;
606 tmp5 -= tmp1;
607 tmp6 -= tmp2;
608 tmp7 -= tmp3;
609 tmp4 *= vec6;
610 tmp5 *= vec7;
611 tmp6 *= vec8;
612 tmp7 *= vec9;
613 tmp4 += const_0x40;
614 tmp5 += const_0x40;
615 tmp6 += const_0x40;
616 tmp7 += const_0x40;
617 tmp4 >>= 7;
618 tmp5 >>= 7;
619 tmp6 >>= 7;
620 tmp7 >>= 7;
621 tmp0 += tmp4;
622 tmp1 += tmp5;
623 tmp2 += tmp6;
624 tmp3 += tmp7;
625 reg0 = (v8u16)__msa_pckev_h((v8i16)tmp1, (v8i16)tmp0);
626 reg1 = (v8u16)__msa_pckev_h((v8i16)tmp3, (v8i16)tmp2);
627 dst0 = (v16u8)__msa_pckev_b((v16i8)reg1, (v16i8)reg0);
628 __msa_st_b(dst0, dst_ptr, 0);
629 dst_ptr += 16;
630 }
631 }
632
ScaleARGBCols_MSA(uint8_t * dst_argb,const uint8_t * src_argb,int dst_width,int x,int dx)633 void ScaleARGBCols_MSA(uint8_t* dst_argb,
634 const uint8_t* src_argb,
635 int dst_width,
636 int x,
637 int dx) {
638 const uint32_t* src = (const uint32_t*)(src_argb);
639 uint32_t* dst = (uint32_t*)(dst_argb);
640 int j;
641 v4i32 x_vec = __msa_fill_w(x);
642 v4i32 dx_vec = __msa_fill_w(dx);
643 v4i32 const_vec = {0, 1, 2, 3};
644 v4i32 vec0, vec1, vec2;
645 v4i32 dst0;
646
647 vec0 = dx_vec * const_vec;
648 vec1 = dx_vec * 4;
649 x_vec += vec0;
650
651 for (j = 0; j < dst_width; j += 4) {
652 vec2 = x_vec >> 16;
653 x_vec += vec1;
654 LOAD_INDEXED_DATA(src, vec2, dst0);
655 __msa_st_w(dst0, dst, 0);
656 dst += 4;
657 }
658 }
659
ScaleARGBFilterCols_MSA(uint8_t * dst_argb,const uint8_t * src_argb,int dst_width,int x,int dx)660 void ScaleARGBFilterCols_MSA(uint8_t* dst_argb,
661 const uint8_t* src_argb,
662 int dst_width,
663 int x,
664 int dx) {
665 const uint32_t* src = (const uint32_t*)(src_argb);
666 int j;
667 v4u32 src0, src1, src2, src3;
668 v4u32 vec0, vec1, vec2, vec3;
669 v16u8 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7;
670 v16u8 mult0, mult1, mult2, mult3;
671 v8u16 tmp0, tmp1, tmp2, tmp3;
672 v16u8 dst0, dst1;
673 v4u32 vec_x = (v4u32)__msa_fill_w(x);
674 v4u32 vec_dx = (v4u32)__msa_fill_w(dx);
675 v4u32 vec_const = {0, 1, 2, 3};
676 v16u8 const_0x7f = (v16u8)__msa_fill_b(0x7f);
677
678 vec0 = vec_dx * vec_const;
679 vec1 = vec_dx * 4;
680 vec_x += vec0;
681
682 for (j = 0; j < dst_width - 1; j += 8) {
683 vec2 = vec_x >> 16;
684 reg0 = (v16u8)(vec_x >> 9);
685 vec_x += vec1;
686 vec3 = vec_x >> 16;
687 reg1 = (v16u8)(vec_x >> 9);
688 vec_x += vec1;
689 reg0 = reg0 & const_0x7f;
690 reg1 = reg1 & const_0x7f;
691 reg0 = (v16u8)__msa_shf_b((v16i8)reg0, 0);
692 reg1 = (v16u8)__msa_shf_b((v16i8)reg1, 0);
693 reg2 = reg0 ^ const_0x7f;
694 reg3 = reg1 ^ const_0x7f;
695 mult0 = (v16u8)__msa_ilvr_b((v16i8)reg0, (v16i8)reg2);
696 mult1 = (v16u8)__msa_ilvl_b((v16i8)reg0, (v16i8)reg2);
697 mult2 = (v16u8)__msa_ilvr_b((v16i8)reg1, (v16i8)reg3);
698 mult3 = (v16u8)__msa_ilvl_b((v16i8)reg1, (v16i8)reg3);
699 LOAD_INDEXED_DATA(src, vec2, src0);
700 LOAD_INDEXED_DATA(src, vec3, src1);
701 vec2 += 1;
702 vec3 += 1;
703 LOAD_INDEXED_DATA(src, vec2, src2);
704 LOAD_INDEXED_DATA(src, vec3, src3);
705 reg4 = (v16u8)__msa_ilvr_b((v16i8)src2, (v16i8)src0);
706 reg5 = (v16u8)__msa_ilvl_b((v16i8)src2, (v16i8)src0);
707 reg6 = (v16u8)__msa_ilvr_b((v16i8)src3, (v16i8)src1);
708 reg7 = (v16u8)__msa_ilvl_b((v16i8)src3, (v16i8)src1);
709 tmp0 = __msa_dotp_u_h(reg4, mult0);
710 tmp1 = __msa_dotp_u_h(reg5, mult1);
711 tmp2 = __msa_dotp_u_h(reg6, mult2);
712 tmp3 = __msa_dotp_u_h(reg7, mult3);
713 tmp0 >>= 7;
714 tmp1 >>= 7;
715 tmp2 >>= 7;
716 tmp3 >>= 7;
717 dst0 = (v16u8)__msa_pckev_b((v16i8)tmp1, (v16i8)tmp0);
718 dst1 = (v16u8)__msa_pckev_b((v16i8)tmp3, (v16i8)tmp2);
719 __msa_st_b(dst0, dst_argb, 0);
720 __msa_st_b(dst1, dst_argb, 16);
721 dst_argb += 32;
722 }
723 }
724
ScaleRowDown34_MSA(const uint8_t * src_ptr,ptrdiff_t src_stride,uint8_t * dst,int dst_width)725 void ScaleRowDown34_MSA(const uint8_t* src_ptr,
726 ptrdiff_t src_stride,
727 uint8_t* dst,
728 int dst_width) {
729 int x;
730 (void)src_stride;
731 v16u8 src0, src1, src2, src3;
732 v16u8 vec0, vec1, vec2;
733 v16i8 mask0 = {0, 1, 3, 4, 5, 7, 8, 9, 11, 12, 13, 15, 16, 17, 19, 20};
734 v16i8 mask1 = {5, 7, 8, 9, 11, 12, 13, 15, 16, 17, 19, 20, 21, 23, 24, 25};
735 v16i8 mask2 = {11, 12, 13, 15, 16, 17, 19, 20,
736 21, 23, 24, 25, 27, 28, 29, 31};
737
738 assert((dst_width % 3 == 0) && (dst_width > 0));
739
740 for (x = 0; x < dst_width; x += 48) {
741 src0 = (v16u8)__msa_ld_b((v16i8*)src_ptr, 0);
742 src1 = (v16u8)__msa_ld_b((v16i8*)src_ptr, 16);
743 src2 = (v16u8)__msa_ld_b((v16i8*)src_ptr, 32);
744 src3 = (v16u8)__msa_ld_b((v16i8*)src_ptr, 48);
745 vec0 = (v16u8)__msa_vshf_b(mask0, (v16i8)src1, (v16i8)src0);
746 vec1 = (v16u8)__msa_vshf_b(mask1, (v16i8)src2, (v16i8)src1);
747 vec2 = (v16u8)__msa_vshf_b(mask2, (v16i8)src3, (v16i8)src2);
748 __msa_st_b((v16i8)vec0, dst, 0);
749 __msa_st_b((v16i8)vec1, dst, 16);
750 __msa_st_b((v16i8)vec2, dst, 32);
751 src_ptr += 64;
752 dst += 48;
753 }
754 }
755
ScaleRowDown34_0_Box_MSA(const uint8_t * src_ptr,ptrdiff_t src_stride,uint8_t * d,int dst_width)756 void ScaleRowDown34_0_Box_MSA(const uint8_t* src_ptr,
757 ptrdiff_t src_stride,
758 uint8_t* d,
759 int dst_width) {
760 const uint8_t* s = src_ptr;
761 const uint8_t* t = src_ptr + src_stride;
762 int x;
763 v16u8 src0, src1, src2, src3, src4, src5, src6, src7, dst0, dst1, dst2;
764 v16u8 vec0, vec1, vec2, vec3, vec4, vec5;
765 v16u8 vec6, vec7, vec8, vec9, vec10, vec11;
766 v8i16 reg0, reg1, reg2, reg3, reg4, reg5;
767 v8i16 reg6, reg7, reg8, reg9, reg10, reg11;
768 v16u8 const0 = {3, 1, 1, 1, 1, 3, 3, 1, 1, 1, 1, 3, 3, 1, 1, 1};
769 v16u8 const1 = {1, 3, 3, 1, 1, 1, 1, 3, 3, 1, 1, 1, 1, 3, 3, 1};
770 v16u8 const2 = {1, 1, 1, 3, 3, 1, 1, 1, 1, 3, 3, 1, 1, 1, 1, 3};
771 v16i8 mask0 = {0, 1, 1, 2, 2, 3, 4, 5, 5, 6, 6, 7, 8, 9, 9, 10};
772 v16i8 mask1 = {10, 11, 12, 13, 13, 14, 14, 15,
773 16, 17, 17, 18, 18, 19, 20, 21};
774 v16i8 mask2 = {5, 6, 6, 7, 8, 9, 9, 10, 10, 11, 12, 13, 13, 14, 14, 15};
775 v8i16 shft0 = {2, 1, 2, 2, 1, 2, 2, 1};
776 v8i16 shft1 = {2, 2, 1, 2, 2, 1, 2, 2};
777 v8i16 shft2 = {1, 2, 2, 1, 2, 2, 1, 2};
778
779 assert((dst_width % 3 == 0) && (dst_width > 0));
780
781 for (x = 0; x < dst_width; x += 48) {
782 src0 = (v16u8)__msa_ld_b((v16i8*)s, 0);
783 src1 = (v16u8)__msa_ld_b((v16i8*)s, 16);
784 src2 = (v16u8)__msa_ld_b((v16i8*)s, 32);
785 src3 = (v16u8)__msa_ld_b((v16i8*)s, 48);
786 src4 = (v16u8)__msa_ld_b((v16i8*)t, 0);
787 src5 = (v16u8)__msa_ld_b((v16i8*)t, 16);
788 src6 = (v16u8)__msa_ld_b((v16i8*)t, 32);
789 src7 = (v16u8)__msa_ld_b((v16i8*)t, 48);
790 vec0 = (v16u8)__msa_vshf_b(mask0, (v16i8)src0, (v16i8)src0);
791 vec1 = (v16u8)__msa_vshf_b(mask1, (v16i8)src1, (v16i8)src0);
792 vec2 = (v16u8)__msa_vshf_b(mask2, (v16i8)src1, (v16i8)src1);
793 vec3 = (v16u8)__msa_vshf_b(mask0, (v16i8)src2, (v16i8)src2);
794 vec4 = (v16u8)__msa_vshf_b(mask1, (v16i8)src3, (v16i8)src2);
795 vec5 = (v16u8)__msa_vshf_b(mask2, (v16i8)src3, (v16i8)src3);
796 vec6 = (v16u8)__msa_vshf_b(mask0, (v16i8)src4, (v16i8)src4);
797 vec7 = (v16u8)__msa_vshf_b(mask1, (v16i8)src5, (v16i8)src4);
798 vec8 = (v16u8)__msa_vshf_b(mask2, (v16i8)src5, (v16i8)src5);
799 vec9 = (v16u8)__msa_vshf_b(mask0, (v16i8)src6, (v16i8)src6);
800 vec10 = (v16u8)__msa_vshf_b(mask1, (v16i8)src7, (v16i8)src6);
801 vec11 = (v16u8)__msa_vshf_b(mask2, (v16i8)src7, (v16i8)src7);
802 reg0 = (v8i16)__msa_dotp_u_h(vec0, const0);
803 reg1 = (v8i16)__msa_dotp_u_h(vec1, const1);
804 reg2 = (v8i16)__msa_dotp_u_h(vec2, const2);
805 reg3 = (v8i16)__msa_dotp_u_h(vec3, const0);
806 reg4 = (v8i16)__msa_dotp_u_h(vec4, const1);
807 reg5 = (v8i16)__msa_dotp_u_h(vec5, const2);
808 reg6 = (v8i16)__msa_dotp_u_h(vec6, const0);
809 reg7 = (v8i16)__msa_dotp_u_h(vec7, const1);
810 reg8 = (v8i16)__msa_dotp_u_h(vec8, const2);
811 reg9 = (v8i16)__msa_dotp_u_h(vec9, const0);
812 reg10 = (v8i16)__msa_dotp_u_h(vec10, const1);
813 reg11 = (v8i16)__msa_dotp_u_h(vec11, const2);
814 reg0 = __msa_srar_h(reg0, shft0);
815 reg1 = __msa_srar_h(reg1, shft1);
816 reg2 = __msa_srar_h(reg2, shft2);
817 reg3 = __msa_srar_h(reg3, shft0);
818 reg4 = __msa_srar_h(reg4, shft1);
819 reg5 = __msa_srar_h(reg5, shft2);
820 reg6 = __msa_srar_h(reg6, shft0);
821 reg7 = __msa_srar_h(reg7, shft1);
822 reg8 = __msa_srar_h(reg8, shft2);
823 reg9 = __msa_srar_h(reg9, shft0);
824 reg10 = __msa_srar_h(reg10, shft1);
825 reg11 = __msa_srar_h(reg11, shft2);
826 reg0 = reg0 * 3 + reg6;
827 reg1 = reg1 * 3 + reg7;
828 reg2 = reg2 * 3 + reg8;
829 reg3 = reg3 * 3 + reg9;
830 reg4 = reg4 * 3 + reg10;
831 reg5 = reg5 * 3 + reg11;
832 reg0 = __msa_srari_h(reg0, 2);
833 reg1 = __msa_srari_h(reg1, 2);
834 reg2 = __msa_srari_h(reg2, 2);
835 reg3 = __msa_srari_h(reg3, 2);
836 reg4 = __msa_srari_h(reg4, 2);
837 reg5 = __msa_srari_h(reg5, 2);
838 dst0 = (v16u8)__msa_pckev_b((v16i8)reg1, (v16i8)reg0);
839 dst1 = (v16u8)__msa_pckev_b((v16i8)reg3, (v16i8)reg2);
840 dst2 = (v16u8)__msa_pckev_b((v16i8)reg5, (v16i8)reg4);
841 __msa_st_b((v16i8)dst0, d, 0);
842 __msa_st_b((v16i8)dst1, d, 16);
843 __msa_st_b((v16i8)dst2, d, 32);
844 s += 64;
845 t += 64;
846 d += 48;
847 }
848 }
849
ScaleRowDown34_1_Box_MSA(const uint8_t * src_ptr,ptrdiff_t src_stride,uint8_t * d,int dst_width)850 void ScaleRowDown34_1_Box_MSA(const uint8_t* src_ptr,
851 ptrdiff_t src_stride,
852 uint8_t* d,
853 int dst_width) {
854 const uint8_t* s = src_ptr;
855 const uint8_t* t = src_ptr + src_stride;
856 int x;
857 v16u8 src0, src1, src2, src3, src4, src5, src6, src7, dst0, dst1, dst2;
858 v16u8 vec0, vec1, vec2, vec3, vec4, vec5;
859 v16u8 vec6, vec7, vec8, vec9, vec10, vec11;
860 v8i16 reg0, reg1, reg2, reg3, reg4, reg5;
861 v8i16 reg6, reg7, reg8, reg9, reg10, reg11;
862 v16u8 const0 = {3, 1, 1, 1, 1, 3, 3, 1, 1, 1, 1, 3, 3, 1, 1, 1};
863 v16u8 const1 = {1, 3, 3, 1, 1, 1, 1, 3, 3, 1, 1, 1, 1, 3, 3, 1};
864 v16u8 const2 = {1, 1, 1, 3, 3, 1, 1, 1, 1, 3, 3, 1, 1, 1, 1, 3};
865 v16i8 mask0 = {0, 1, 1, 2, 2, 3, 4, 5, 5, 6, 6, 7, 8, 9, 9, 10};
866 v16i8 mask1 = {10, 11, 12, 13, 13, 14, 14, 15,
867 16, 17, 17, 18, 18, 19, 20, 21};
868 v16i8 mask2 = {5, 6, 6, 7, 8, 9, 9, 10, 10, 11, 12, 13, 13, 14, 14, 15};
869 v8i16 shft0 = {2, 1, 2, 2, 1, 2, 2, 1};
870 v8i16 shft1 = {2, 2, 1, 2, 2, 1, 2, 2};
871 v8i16 shft2 = {1, 2, 2, 1, 2, 2, 1, 2};
872
873 assert((dst_width % 3 == 0) && (dst_width > 0));
874
875 for (x = 0; x < dst_width; x += 48) {
876 src0 = (v16u8)__msa_ld_b((v16i8*)s, 0);
877 src1 = (v16u8)__msa_ld_b((v16i8*)s, 16);
878 src2 = (v16u8)__msa_ld_b((v16i8*)s, 32);
879 src3 = (v16u8)__msa_ld_b((v16i8*)s, 48);
880 src4 = (v16u8)__msa_ld_b((v16i8*)t, 0);
881 src5 = (v16u8)__msa_ld_b((v16i8*)t, 16);
882 src6 = (v16u8)__msa_ld_b((v16i8*)t, 32);
883 src7 = (v16u8)__msa_ld_b((v16i8*)t, 48);
884 vec0 = (v16u8)__msa_vshf_b(mask0, (v16i8)src0, (v16i8)src0);
885 vec1 = (v16u8)__msa_vshf_b(mask1, (v16i8)src1, (v16i8)src0);
886 vec2 = (v16u8)__msa_vshf_b(mask2, (v16i8)src1, (v16i8)src1);
887 vec3 = (v16u8)__msa_vshf_b(mask0, (v16i8)src2, (v16i8)src2);
888 vec4 = (v16u8)__msa_vshf_b(mask1, (v16i8)src3, (v16i8)src2);
889 vec5 = (v16u8)__msa_vshf_b(mask2, (v16i8)src3, (v16i8)src3);
890 vec6 = (v16u8)__msa_vshf_b(mask0, (v16i8)src4, (v16i8)src4);
891 vec7 = (v16u8)__msa_vshf_b(mask1, (v16i8)src5, (v16i8)src4);
892 vec8 = (v16u8)__msa_vshf_b(mask2, (v16i8)src5, (v16i8)src5);
893 vec9 = (v16u8)__msa_vshf_b(mask0, (v16i8)src6, (v16i8)src6);
894 vec10 = (v16u8)__msa_vshf_b(mask1, (v16i8)src7, (v16i8)src6);
895 vec11 = (v16u8)__msa_vshf_b(mask2, (v16i8)src7, (v16i8)src7);
896 reg0 = (v8i16)__msa_dotp_u_h(vec0, const0);
897 reg1 = (v8i16)__msa_dotp_u_h(vec1, const1);
898 reg2 = (v8i16)__msa_dotp_u_h(vec2, const2);
899 reg3 = (v8i16)__msa_dotp_u_h(vec3, const0);
900 reg4 = (v8i16)__msa_dotp_u_h(vec4, const1);
901 reg5 = (v8i16)__msa_dotp_u_h(vec5, const2);
902 reg6 = (v8i16)__msa_dotp_u_h(vec6, const0);
903 reg7 = (v8i16)__msa_dotp_u_h(vec7, const1);
904 reg8 = (v8i16)__msa_dotp_u_h(vec8, const2);
905 reg9 = (v8i16)__msa_dotp_u_h(vec9, const0);
906 reg10 = (v8i16)__msa_dotp_u_h(vec10, const1);
907 reg11 = (v8i16)__msa_dotp_u_h(vec11, const2);
908 reg0 = __msa_srar_h(reg0, shft0);
909 reg1 = __msa_srar_h(reg1, shft1);
910 reg2 = __msa_srar_h(reg2, shft2);
911 reg3 = __msa_srar_h(reg3, shft0);
912 reg4 = __msa_srar_h(reg4, shft1);
913 reg5 = __msa_srar_h(reg5, shft2);
914 reg6 = __msa_srar_h(reg6, shft0);
915 reg7 = __msa_srar_h(reg7, shft1);
916 reg8 = __msa_srar_h(reg8, shft2);
917 reg9 = __msa_srar_h(reg9, shft0);
918 reg10 = __msa_srar_h(reg10, shft1);
919 reg11 = __msa_srar_h(reg11, shft2);
920 reg0 += reg6;
921 reg1 += reg7;
922 reg2 += reg8;
923 reg3 += reg9;
924 reg4 += reg10;
925 reg5 += reg11;
926 reg0 = __msa_srari_h(reg0, 1);
927 reg1 = __msa_srari_h(reg1, 1);
928 reg2 = __msa_srari_h(reg2, 1);
929 reg3 = __msa_srari_h(reg3, 1);
930 reg4 = __msa_srari_h(reg4, 1);
931 reg5 = __msa_srari_h(reg5, 1);
932 dst0 = (v16u8)__msa_pckev_b((v16i8)reg1, (v16i8)reg0);
933 dst1 = (v16u8)__msa_pckev_b((v16i8)reg3, (v16i8)reg2);
934 dst2 = (v16u8)__msa_pckev_b((v16i8)reg5, (v16i8)reg4);
935 __msa_st_b((v16i8)dst0, d, 0);
936 __msa_st_b((v16i8)dst1, d, 16);
937 __msa_st_b((v16i8)dst2, d, 32);
938 s += 64;
939 t += 64;
940 d += 48;
941 }
942 }
943
944 #ifdef __cplusplus
945 } // extern "C"
946 } // namespace libyuv
947 #endif
948
949 #endif // !defined(LIBYUV_DISABLE_MSA) && defined(__mips_msa)
950