1 /*
2  *  Copyright (c) 2016 The WebM project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10 
11 #include <stdlib.h>
12 #include "./macros_msa.h"
13 
14 extern const int16_t vpx_rv[];
15 
16 #define VPX_TRANSPOSE8x16_UB_UB(in0, in1, in2, in3, in4, in5, in6, in7, out0,  \
17                                 out1, out2, out3, out4, out5, out6, out7,      \
18                                 out8, out9, out10, out11, out12, out13, out14, \
19                                 out15)                                         \
20   {                                                                            \
21     v8i16 temp0, temp1, temp2, temp3, temp4;                                   \
22     v8i16 temp5, temp6, temp7, temp8, temp9;                                   \
23                                                                                \
24     ILVR_B4_SH(in1, in0, in3, in2, in5, in4, in7, in6, temp0, temp1, temp2,    \
25                temp3);                                                         \
26     ILVR_H2_SH(temp1, temp0, temp3, temp2, temp4, temp5);                      \
27     ILVRL_W2_SH(temp5, temp4, temp6, temp7);                                   \
28     ILVL_H2_SH(temp1, temp0, temp3, temp2, temp4, temp5);                      \
29     ILVRL_W2_SH(temp5, temp4, temp8, temp9);                                   \
30     ILVL_B4_SH(in1, in0, in3, in2, in5, in4, in7, in6, temp0, temp1, temp2,    \
31                temp3);                                                         \
32     ILVR_H2_SH(temp1, temp0, temp3, temp2, temp4, temp5);                      \
33     ILVRL_W2_UB(temp5, temp4, out8, out10);                                    \
34     ILVL_H2_SH(temp1, temp0, temp3, temp2, temp4, temp5);                      \
35     ILVRL_W2_UB(temp5, temp4, out12, out14);                                   \
36     out0 = (v16u8)temp6;                                                       \
37     out2 = (v16u8)temp7;                                                       \
38     out4 = (v16u8)temp8;                                                       \
39     out6 = (v16u8)temp9;                                                       \
40     out9 = (v16u8)__msa_ilvl_d((v2i64)out8, (v2i64)out8);                      \
41     out11 = (v16u8)__msa_ilvl_d((v2i64)out10, (v2i64)out10);                   \
42     out13 = (v16u8)__msa_ilvl_d((v2i64)out12, (v2i64)out12);                   \
43     out15 = (v16u8)__msa_ilvl_d((v2i64)out14, (v2i64)out14);                   \
44     out1 = (v16u8)__msa_ilvl_d((v2i64)out0, (v2i64)out0);                      \
45     out3 = (v16u8)__msa_ilvl_d((v2i64)out2, (v2i64)out2);                      \
46     out5 = (v16u8)__msa_ilvl_d((v2i64)out4, (v2i64)out4);                      \
47     out7 = (v16u8)__msa_ilvl_d((v2i64)out6, (v2i64)out6);                      \
48   }
49 
50 #define VPX_AVER_IF_RETAIN(above2_in, above1_in, src_in, below1_in, below2_in, \
51                            ref, out)                                           \
52   {                                                                            \
53     v16u8 temp0, temp1;                                                        \
54                                                                                \
55     temp1 = __msa_aver_u_b(above2_in, above1_in);                              \
56     temp0 = __msa_aver_u_b(below2_in, below1_in);                              \
57     temp1 = __msa_aver_u_b(temp1, temp0);                                      \
58     out = __msa_aver_u_b(src_in, temp1);                                       \
59     temp0 = __msa_asub_u_b(src_in, above2_in);                                 \
60     temp1 = __msa_asub_u_b(src_in, above1_in);                                 \
61     temp0 = (temp0 < ref);                                                     \
62     temp1 = (temp1 < ref);                                                     \
63     temp0 = temp0 & temp1;                                                     \
64     temp1 = __msa_asub_u_b(src_in, below1_in);                                 \
65     temp1 = (temp1 < ref);                                                     \
66     temp0 = temp0 & temp1;                                                     \
67     temp1 = __msa_asub_u_b(src_in, below2_in);                                 \
68     temp1 = (temp1 < ref);                                                     \
69     temp0 = temp0 & temp1;                                                     \
70     out = __msa_bmz_v(out, src_in, temp0);                                     \
71   }
72 
73 #define TRANSPOSE12x16_B(in0, in1, in2, in3, in4, in5, in6, in7, in8, in9,    \
74                          in10, in11, in12, in13, in14, in15)                  \
75   {                                                                           \
76     v8i16 temp0, temp1, temp2, temp3, temp4;                                  \
77     v8i16 temp5, temp6, temp7, temp8, temp9;                                  \
78                                                                               \
79     ILVR_B2_SH(in1, in0, in3, in2, temp0, temp1);                             \
80     ILVRL_H2_SH(temp1, temp0, temp2, temp3);                                  \
81     ILVR_B2_SH(in5, in4, in7, in6, temp0, temp1);                             \
82     ILVRL_H2_SH(temp1, temp0, temp4, temp5);                                  \
83     ILVRL_W2_SH(temp4, temp2, temp0, temp1);                                  \
84     ILVRL_W2_SH(temp5, temp3, temp2, temp3);                                  \
85     ILVR_B2_SH(in9, in8, in11, in10, temp4, temp5);                           \
86     ILVR_B2_SH(in9, in8, in11, in10, temp4, temp5);                           \
87     ILVRL_H2_SH(temp5, temp4, temp6, temp7);                                  \
88     ILVR_B2_SH(in13, in12, in15, in14, temp4, temp5);                         \
89     ILVRL_H2_SH(temp5, temp4, temp8, temp9);                                  \
90     ILVRL_W2_SH(temp8, temp6, temp4, temp5);                                  \
91     ILVRL_W2_SH(temp9, temp7, temp6, temp7);                                  \
92     ILVL_B2_SH(in1, in0, in3, in2, temp8, temp9);                             \
93     ILVR_D2_UB(temp4, temp0, temp5, temp1, in0, in2);                         \
94     in1 = (v16u8)__msa_ilvl_d((v2i64)temp4, (v2i64)temp0);                    \
95     in3 = (v16u8)__msa_ilvl_d((v2i64)temp5, (v2i64)temp1);                    \
96     ILVL_B2_SH(in5, in4, in7, in6, temp0, temp1);                             \
97     ILVR_D2_UB(temp6, temp2, temp7, temp3, in4, in6);                         \
98     in5 = (v16u8)__msa_ilvl_d((v2i64)temp6, (v2i64)temp2);                    \
99     in7 = (v16u8)__msa_ilvl_d((v2i64)temp7, (v2i64)temp3);                    \
100     ILVL_B4_SH(in9, in8, in11, in10, in13, in12, in15, in14, temp2, temp3,    \
101                temp4, temp5);                                                 \
102     ILVR_H4_SH(temp9, temp8, temp1, temp0, temp3, temp2, temp5, temp4, temp6, \
103                temp7, temp8, temp9);                                          \
104     ILVR_W2_SH(temp7, temp6, temp9, temp8, temp0, temp1);                     \
105     in8 = (v16u8)__msa_ilvr_d((v2i64)temp1, (v2i64)temp0);                    \
106     in9 = (v16u8)__msa_ilvl_d((v2i64)temp1, (v2i64)temp0);                    \
107     ILVL_W2_SH(temp7, temp6, temp9, temp8, temp2, temp3);                     \
108     in10 = (v16u8)__msa_ilvr_d((v2i64)temp3, (v2i64)temp2);                   \
109     in11 = (v16u8)__msa_ilvl_d((v2i64)temp3, (v2i64)temp2);                   \
110   }
111 
112 #define VPX_TRANSPOSE12x8_UB_UB(in0, in1, in2, in3, in4, in5, in6, in7, in8, \
113                                 in9, in10, in11)                             \
114   {                                                                          \
115     v8i16 temp0, temp1, temp2, temp3;                                        \
116     v8i16 temp4, temp5, temp6, temp7;                                        \
117                                                                              \
118     ILVR_B2_SH(in1, in0, in3, in2, temp0, temp1);                            \
119     ILVRL_H2_SH(temp1, temp0, temp2, temp3);                                 \
120     ILVR_B2_SH(in5, in4, in7, in6, temp0, temp1);                            \
121     ILVRL_H2_SH(temp1, temp0, temp4, temp5);                                 \
122     ILVRL_W2_SH(temp4, temp2, temp0, temp1);                                 \
123     ILVRL_W2_SH(temp5, temp3, temp2, temp3);                                 \
124     ILVL_B2_SH(in1, in0, in3, in2, temp4, temp5);                            \
125     temp4 = __msa_ilvr_h(temp5, temp4);                                      \
126     ILVL_B2_SH(in5, in4, in7, in6, temp6, temp7);                            \
127     temp5 = __msa_ilvr_h(temp7, temp6);                                      \
128     ILVRL_W2_SH(temp5, temp4, temp6, temp7);                                 \
129     in0 = (v16u8)temp0;                                                      \
130     in2 = (v16u8)temp1;                                                      \
131     in4 = (v16u8)temp2;                                                      \
132     in6 = (v16u8)temp3;                                                      \
133     in8 = (v16u8)temp6;                                                      \
134     in10 = (v16u8)temp7;                                                     \
135     in1 = (v16u8)__msa_ilvl_d((v2i64)temp0, (v2i64)temp0);                   \
136     in3 = (v16u8)__msa_ilvl_d((v2i64)temp1, (v2i64)temp1);                   \
137     in5 = (v16u8)__msa_ilvl_d((v2i64)temp2, (v2i64)temp2);                   \
138     in7 = (v16u8)__msa_ilvl_d((v2i64)temp3, (v2i64)temp3);                   \
139     in9 = (v16u8)__msa_ilvl_d((v2i64)temp6, (v2i64)temp6);                   \
140     in11 = (v16u8)__msa_ilvl_d((v2i64)temp7, (v2i64)temp7);                  \
141   }
142 
postproc_down_across_chroma_msa(uint8_t * src_ptr,uint8_t * dst_ptr,int32_t src_stride,int32_t dst_stride,int32_t cols,uint8_t * f)143 static void postproc_down_across_chroma_msa(uint8_t *src_ptr, uint8_t *dst_ptr,
144                                             int32_t src_stride,
145                                             int32_t dst_stride, int32_t cols,
146                                             uint8_t *f) {
147   uint8_t *p_src = src_ptr;
148   uint8_t *p_dst = dst_ptr;
149   uint8_t *f_orig = f;
150   uint8_t *p_dst_st = dst_ptr;
151   uint16_t col;
152   uint64_t out0, out1, out2, out3;
153   v16u8 above2, above1, below2, below1, src, ref, ref_temp;
154   v16u8 inter0, inter1, inter2, inter3, inter4, inter5;
155   v16u8 inter6, inter7, inter8, inter9, inter10, inter11;
156 
157   for (col = (cols / 16); col--;) {
158     ref = LD_UB(f);
159     LD_UB2(p_src - 2 * src_stride, src_stride, above2, above1);
160     src = LD_UB(p_src);
161     LD_UB2(p_src + 1 * src_stride, src_stride, below1, below2);
162     VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter0);
163     above2 = LD_UB(p_src + 3 * src_stride);
164     VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref, inter1);
165     above1 = LD_UB(p_src + 4 * src_stride);
166     VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref, inter2);
167     src = LD_UB(p_src + 5 * src_stride);
168     VPX_AVER_IF_RETAIN(below1, below2, above2, above1, src, ref, inter3);
169     below1 = LD_UB(p_src + 6 * src_stride);
170     VPX_AVER_IF_RETAIN(below2, above2, above1, src, below1, ref, inter4);
171     below2 = LD_UB(p_src + 7 * src_stride);
172     VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter5);
173     above2 = LD_UB(p_src + 8 * src_stride);
174     VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref, inter6);
175     above1 = LD_UB(p_src + 9 * src_stride);
176     VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref, inter7);
177     ST_UB8(inter0, inter1, inter2, inter3, inter4, inter5, inter6, inter7,
178            p_dst, dst_stride);
179 
180     p_dst += 16;
181     p_src += 16;
182     f += 16;
183   }
184 
185   if (0 != (cols / 16)) {
186     ref = LD_UB(f);
187     LD_UB2(p_src - 2 * src_stride, src_stride, above2, above1);
188     src = LD_UB(p_src);
189     LD_UB2(p_src + 1 * src_stride, src_stride, below1, below2);
190     VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter0);
191     above2 = LD_UB(p_src + 3 * src_stride);
192     VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref, inter1);
193     above1 = LD_UB(p_src + 4 * src_stride);
194     VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref, inter2);
195     src = LD_UB(p_src + 5 * src_stride);
196     VPX_AVER_IF_RETAIN(below1, below2, above2, above1, src, ref, inter3);
197     below1 = LD_UB(p_src + 6 * src_stride);
198     VPX_AVER_IF_RETAIN(below2, above2, above1, src, below1, ref, inter4);
199     below2 = LD_UB(p_src + 7 * src_stride);
200     VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter5);
201     above2 = LD_UB(p_src + 8 * src_stride);
202     VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref, inter6);
203     above1 = LD_UB(p_src + 9 * src_stride);
204     VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref, inter7);
205     out0 = __msa_copy_u_d((v2i64)inter0, 0);
206     out1 = __msa_copy_u_d((v2i64)inter1, 0);
207     out2 = __msa_copy_u_d((v2i64)inter2, 0);
208     out3 = __msa_copy_u_d((v2i64)inter3, 0);
209     SD4(out0, out1, out2, out3, p_dst, dst_stride);
210 
211     out0 = __msa_copy_u_d((v2i64)inter4, 0);
212     out1 = __msa_copy_u_d((v2i64)inter5, 0);
213     out2 = __msa_copy_u_d((v2i64)inter6, 0);
214     out3 = __msa_copy_u_d((v2i64)inter7, 0);
215     SD4(out0, out1, out2, out3, p_dst + 4 * dst_stride, dst_stride);
216   }
217 
218   f = f_orig;
219   p_dst = dst_ptr - 2;
220   LD_UB8(p_dst, dst_stride, inter0, inter1, inter2, inter3, inter4, inter5,
221          inter6, inter7);
222 
223   for (col = 0; col < (cols / 8); ++col) {
224     ref = LD_UB(f);
225     f += 8;
226     VPX_TRANSPOSE12x8_UB_UB(inter0, inter1, inter2, inter3, inter4, inter5,
227                             inter6, inter7, inter8, inter9, inter10, inter11);
228     if (0 == col) {
229       above2 = inter2;
230       above1 = inter2;
231     } else {
232       above2 = inter0;
233       above1 = inter1;
234     }
235     src = inter2;
236     below1 = inter3;
237     below2 = inter4;
238     ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 0);
239     VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref_temp, inter2);
240     above2 = inter5;
241     ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 1);
242     VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref_temp, inter3);
243     above1 = inter6;
244     ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 2);
245     VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref_temp, inter4);
246     src = inter7;
247     ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 3);
248     VPX_AVER_IF_RETAIN(below1, below2, above2, above1, src, ref_temp, inter5);
249     below1 = inter8;
250     ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 4);
251     VPX_AVER_IF_RETAIN(below2, above2, above1, src, below1, ref_temp, inter6);
252     below2 = inter9;
253     ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 5);
254     VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref_temp, inter7);
255     if (col == (cols / 8 - 1)) {
256       above2 = inter9;
257     } else {
258       above2 = inter10;
259     }
260     ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 6);
261     VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref_temp, inter8);
262     if (col == (cols / 8 - 1)) {
263       above1 = inter9;
264     } else {
265       above1 = inter11;
266     }
267     ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 7);
268     VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref_temp, inter9);
269     TRANSPOSE8x8_UB_UB(inter2, inter3, inter4, inter5, inter6, inter7, inter8,
270                        inter9, inter2, inter3, inter4, inter5, inter6, inter7,
271                        inter8, inter9);
272     p_dst += 8;
273     LD_UB2(p_dst, dst_stride, inter0, inter1);
274     ST8x1_UB(inter2, p_dst_st);
275     ST8x1_UB(inter3, (p_dst_st + 1 * dst_stride));
276     LD_UB2(p_dst + 2 * dst_stride, dst_stride, inter2, inter3);
277     ST8x1_UB(inter4, (p_dst_st + 2 * dst_stride));
278     ST8x1_UB(inter5, (p_dst_st + 3 * dst_stride));
279     LD_UB2(p_dst + 4 * dst_stride, dst_stride, inter4, inter5);
280     ST8x1_UB(inter6, (p_dst_st + 4 * dst_stride));
281     ST8x1_UB(inter7, (p_dst_st + 5 * dst_stride));
282     LD_UB2(p_dst + 6 * dst_stride, dst_stride, inter6, inter7);
283     ST8x1_UB(inter8, (p_dst_st + 6 * dst_stride));
284     ST8x1_UB(inter9, (p_dst_st + 7 * dst_stride));
285     p_dst_st += 8;
286   }
287 }
288 
postproc_down_across_luma_msa(uint8_t * src_ptr,uint8_t * dst_ptr,int32_t src_stride,int32_t dst_stride,int32_t cols,uint8_t * f)289 static void postproc_down_across_luma_msa(uint8_t *src_ptr, uint8_t *dst_ptr,
290                                           int32_t src_stride,
291                                           int32_t dst_stride, int32_t cols,
292                                           uint8_t *f) {
293   uint8_t *p_src = src_ptr;
294   uint8_t *p_dst = dst_ptr;
295   uint8_t *p_dst_st = dst_ptr;
296   uint8_t *f_orig = f;
297   uint16_t col;
298   v16u8 above2, above1, below2, below1;
299   v16u8 src, ref, ref_temp;
300   v16u8 inter0, inter1, inter2, inter3, inter4, inter5, inter6;
301   v16u8 inter7, inter8, inter9, inter10, inter11;
302   v16u8 inter12, inter13, inter14, inter15;
303 
304   for (col = (cols / 16); col--;) {
305     ref = LD_UB(f);
306     LD_UB2(p_src - 2 * src_stride, src_stride, above2, above1);
307     src = LD_UB(p_src);
308     LD_UB2(p_src + 1 * src_stride, src_stride, below1, below2);
309     VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter0);
310     above2 = LD_UB(p_src + 3 * src_stride);
311     VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref, inter1);
312     above1 = LD_UB(p_src + 4 * src_stride);
313     VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref, inter2);
314     src = LD_UB(p_src + 5 * src_stride);
315     VPX_AVER_IF_RETAIN(below1, below2, above2, above1, src, ref, inter3);
316     below1 = LD_UB(p_src + 6 * src_stride);
317     VPX_AVER_IF_RETAIN(below2, above2, above1, src, below1, ref, inter4);
318     below2 = LD_UB(p_src + 7 * src_stride);
319     VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter5);
320     above2 = LD_UB(p_src + 8 * src_stride);
321     VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref, inter6);
322     above1 = LD_UB(p_src + 9 * src_stride);
323     VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref, inter7);
324     src = LD_UB(p_src + 10 * src_stride);
325     VPX_AVER_IF_RETAIN(below1, below2, above2, above1, src, ref, inter8);
326     below1 = LD_UB(p_src + 11 * src_stride);
327     VPX_AVER_IF_RETAIN(below2, above2, above1, src, below1, ref, inter9);
328     below2 = LD_UB(p_src + 12 * src_stride);
329     VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter10);
330     above2 = LD_UB(p_src + 13 * src_stride);
331     VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref, inter11);
332     above1 = LD_UB(p_src + 14 * src_stride);
333     VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref, inter12);
334     src = LD_UB(p_src + 15 * src_stride);
335     VPX_AVER_IF_RETAIN(below1, below2, above2, above1, src, ref, inter13);
336     below1 = LD_UB(p_src + 16 * src_stride);
337     VPX_AVER_IF_RETAIN(below2, above2, above1, src, below1, ref, inter14);
338     below2 = LD_UB(p_src + 17 * src_stride);
339     VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter15);
340     ST_UB8(inter0, inter1, inter2, inter3, inter4, inter5, inter6, inter7,
341            p_dst, dst_stride);
342     ST_UB8(inter8, inter9, inter10, inter11, inter12, inter13, inter14, inter15,
343            p_dst + 8 * dst_stride, dst_stride);
344     p_src += 16;
345     p_dst += 16;
346     f += 16;
347   }
348 
349   f = f_orig;
350   p_dst = dst_ptr - 2;
351   LD_UB8(p_dst, dst_stride, inter0, inter1, inter2, inter3, inter4, inter5,
352          inter6, inter7);
353   LD_UB8(p_dst + 8 * dst_stride, dst_stride, inter8, inter9, inter10, inter11,
354          inter12, inter13, inter14, inter15);
355 
356   for (col = 0; col < cols / 8; ++col) {
357     ref = LD_UB(f);
358     f += 8;
359     TRANSPOSE12x16_B(inter0, inter1, inter2, inter3, inter4, inter5, inter6,
360                      inter7, inter8, inter9, inter10, inter11, inter12, inter13,
361                      inter14, inter15);
362     if (0 == col) {
363       above2 = inter2;
364       above1 = inter2;
365     } else {
366       above2 = inter0;
367       above1 = inter1;
368     }
369 
370     src = inter2;
371     below1 = inter3;
372     below2 = inter4;
373     ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 0);
374     VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref_temp, inter2);
375     above2 = inter5;
376     ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 1);
377     VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref_temp, inter3);
378     above1 = inter6;
379     ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 2);
380     VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref_temp, inter4);
381     src = inter7;
382     ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 3);
383     VPX_AVER_IF_RETAIN(below1, below2, above2, above1, src, ref_temp, inter5);
384     below1 = inter8;
385     ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 4);
386     VPX_AVER_IF_RETAIN(below2, above2, above1, src, below1, ref_temp, inter6);
387     below2 = inter9;
388     ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 5);
389     VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref_temp, inter7);
390     if (col == (cols / 8 - 1)) {
391       above2 = inter9;
392     } else {
393       above2 = inter10;
394     }
395     ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 6);
396     VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref_temp, inter8);
397     if (col == (cols / 8 - 1)) {
398       above1 = inter9;
399     } else {
400       above1 = inter11;
401     }
402     ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 7);
403     VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref_temp, inter9);
404     VPX_TRANSPOSE8x16_UB_UB(inter2, inter3, inter4, inter5, inter6, inter7,
405                             inter8, inter9, inter2, inter3, inter4, inter5,
406                             inter6, inter7, inter8, inter9, inter10, inter11,
407                             inter12, inter13, inter14, inter15, above2, above1);
408 
409     p_dst += 8;
410     LD_UB2(p_dst, dst_stride, inter0, inter1);
411     ST8x1_UB(inter2, p_dst_st);
412     ST8x1_UB(inter3, (p_dst_st + 1 * dst_stride));
413     LD_UB2(p_dst + 2 * dst_stride, dst_stride, inter2, inter3);
414     ST8x1_UB(inter4, (p_dst_st + 2 * dst_stride));
415     ST8x1_UB(inter5, (p_dst_st + 3 * dst_stride));
416     LD_UB2(p_dst + 4 * dst_stride, dst_stride, inter4, inter5);
417     ST8x1_UB(inter6, (p_dst_st + 4 * dst_stride));
418     ST8x1_UB(inter7, (p_dst_st + 5 * dst_stride));
419     LD_UB2(p_dst + 6 * dst_stride, dst_stride, inter6, inter7);
420     ST8x1_UB(inter8, (p_dst_st + 6 * dst_stride));
421     ST8x1_UB(inter9, (p_dst_st + 7 * dst_stride));
422     LD_UB2(p_dst + 8 * dst_stride, dst_stride, inter8, inter9);
423     ST8x1_UB(inter10, (p_dst_st + 8 * dst_stride));
424     ST8x1_UB(inter11, (p_dst_st + 9 * dst_stride));
425     LD_UB2(p_dst + 10 * dst_stride, dst_stride, inter10, inter11);
426     ST8x1_UB(inter12, (p_dst_st + 10 * dst_stride));
427     ST8x1_UB(inter13, (p_dst_st + 11 * dst_stride));
428     LD_UB2(p_dst + 12 * dst_stride, dst_stride, inter12, inter13);
429     ST8x1_UB(inter14, (p_dst_st + 12 * dst_stride));
430     ST8x1_UB(inter15, (p_dst_st + 13 * dst_stride));
431     LD_UB2(p_dst + 14 * dst_stride, dst_stride, inter14, inter15);
432     ST8x1_UB(above2, (p_dst_st + 14 * dst_stride));
433     ST8x1_UB(above1, (p_dst_st + 15 * dst_stride));
434     p_dst_st += 8;
435   }
436 }
437 
vpx_post_proc_down_and_across_mb_row_msa(uint8_t * src,uint8_t * dst,int32_t src_stride,int32_t dst_stride,int32_t cols,uint8_t * f,int32_t size)438 void vpx_post_proc_down_and_across_mb_row_msa(uint8_t *src, uint8_t *dst,
439                                               int32_t src_stride,
440                                               int32_t dst_stride, int32_t cols,
441                                               uint8_t *f, int32_t size) {
442   if (8 == size) {
443     postproc_down_across_chroma_msa(src, dst, src_stride, dst_stride, cols, f);
444   } else if (16 == size) {
445     postproc_down_across_luma_msa(src, dst, src_stride, dst_stride, cols, f);
446   }
447 }
448 
vpx_mbpost_proc_across_ip_msa(uint8_t * src_ptr,int32_t pitch,int32_t rows,int32_t cols,int32_t flimit)449 void vpx_mbpost_proc_across_ip_msa(uint8_t *src_ptr, int32_t pitch,
450                                    int32_t rows, int32_t cols, int32_t flimit) {
451   int32_t row, col, cnt;
452   uint8_t *src_dup = src_ptr;
453   v16u8 src0, src, tmp_orig;
454   v16u8 tmp = { 0 };
455   v16i8 zero = { 0 };
456   v8u16 sum_h, src_r_h, src_l_h;
457   v4u32 src_r_w;
458   v4i32 flimit_vec;
459 
460   flimit_vec = __msa_fill_w(flimit);
461   for (row = rows; row--;) {
462     int32_t sum_sq;
463     int32_t sum = 0;
464     src0 = (v16u8)__msa_fill_b(src_dup[0]);
465     ST8x1_UB(src0, (src_dup - 8));
466 
467     src0 = (v16u8)__msa_fill_b(src_dup[cols - 1]);
468     ST_UB(src0, src_dup + cols);
469     src_dup[cols + 16] = src_dup[cols - 1];
470     tmp_orig = (v16u8)__msa_ldi_b(0);
471     tmp_orig[15] = tmp[15];
472     src = LD_UB(src_dup - 8);
473     src[15] = 0;
474     ILVRL_B2_UH(zero, src, src_r_h, src_l_h);
475     src_r_w = __msa_dotp_u_w(src_r_h, src_r_h);
476     src_r_w += __msa_dotp_u_w(src_l_h, src_l_h);
477     sum_sq = HADD_SW_S32(src_r_w) + 16;
478     sum_h = __msa_hadd_u_h(src, src);
479     sum = HADD_UH_U32(sum_h);
480     {
481       v16u8 src7, src8, src_r, src_l;
482       v16i8 mask;
483       v8u16 add_r, add_l;
484       v8i16 sub_r, sub_l, sum_r, sum_l, mask0, mask1;
485       v4i32 sum_sq0, sum_sq1, sum_sq2, sum_sq3;
486       v4i32 sub0, sub1, sub2, sub3;
487       v4i32 sum0_w, sum1_w, sum2_w, sum3_w;
488       v4i32 mul0, mul1, mul2, mul3;
489       v4i32 total0, total1, total2, total3;
490       v8i16 const8 = __msa_fill_h(8);
491 
492       src7 = LD_UB(src_dup + 7);
493       src8 = LD_UB(src_dup - 8);
494       for (col = 0; col < (cols >> 4); ++col) {
495         ILVRL_B2_UB(src7, src8, src_r, src_l);
496         HSUB_UB2_SH(src_r, src_l, sub_r, sub_l);
497 
498         sum_r[0] = sum + sub_r[0];
499         for (cnt = 0; cnt < 7; ++cnt) {
500           sum_r[cnt + 1] = sum_r[cnt] + sub_r[cnt + 1];
501         }
502         sum_l[0] = sum_r[7] + sub_l[0];
503         for (cnt = 0; cnt < 7; ++cnt) {
504           sum_l[cnt + 1] = sum_l[cnt] + sub_l[cnt + 1];
505         }
506         sum = sum_l[7];
507         src = LD_UB(src_dup + 16 * col);
508         ILVRL_B2_UH(zero, src, src_r_h, src_l_h);
509         src7 = (v16u8)((const8 + sum_r + (v8i16)src_r_h) >> 4);
510         src8 = (v16u8)((const8 + sum_l + (v8i16)src_l_h) >> 4);
511         tmp = (v16u8)__msa_pckev_b((v16i8)src8, (v16i8)src7);
512 
513         HADD_UB2_UH(src_r, src_l, add_r, add_l);
514         UNPCK_SH_SW(sub_r, sub0, sub1);
515         UNPCK_SH_SW(sub_l, sub2, sub3);
516         ILVR_H2_SW(zero, add_r, zero, add_l, sum0_w, sum2_w);
517         ILVL_H2_SW(zero, add_r, zero, add_l, sum1_w, sum3_w);
518         MUL4(sum0_w, sub0, sum1_w, sub1, sum2_w, sub2, sum3_w, sub3, mul0, mul1,
519              mul2, mul3);
520         sum_sq0[0] = sum_sq + mul0[0];
521         for (cnt = 0; cnt < 3; ++cnt) {
522           sum_sq0[cnt + 1] = sum_sq0[cnt] + mul0[cnt + 1];
523         }
524         sum_sq1[0] = sum_sq0[3] + mul1[0];
525         for (cnt = 0; cnt < 3; ++cnt) {
526           sum_sq1[cnt + 1] = sum_sq1[cnt] + mul1[cnt + 1];
527         }
528         sum_sq2[0] = sum_sq1[3] + mul2[0];
529         for (cnt = 0; cnt < 3; ++cnt) {
530           sum_sq2[cnt + 1] = sum_sq2[cnt] + mul2[cnt + 1];
531         }
532         sum_sq3[0] = sum_sq2[3] + mul3[0];
533         for (cnt = 0; cnt < 3; ++cnt) {
534           sum_sq3[cnt + 1] = sum_sq3[cnt] + mul3[cnt + 1];
535         }
536         sum_sq = sum_sq3[3];
537 
538         UNPCK_SH_SW(sum_r, sum0_w, sum1_w);
539         UNPCK_SH_SW(sum_l, sum2_w, sum3_w);
540         total0 = sum_sq0 * __msa_ldi_w(15);
541         total0 -= sum0_w * sum0_w;
542         total1 = sum_sq1 * __msa_ldi_w(15);
543         total1 -= sum1_w * sum1_w;
544         total2 = sum_sq2 * __msa_ldi_w(15);
545         total2 -= sum2_w * sum2_w;
546         total3 = sum_sq3 * __msa_ldi_w(15);
547         total3 -= sum3_w * sum3_w;
548         total0 = (total0 < flimit_vec);
549         total1 = (total1 < flimit_vec);
550         total2 = (total2 < flimit_vec);
551         total3 = (total3 < flimit_vec);
552         PCKEV_H2_SH(total1, total0, total3, total2, mask0, mask1);
553         mask = __msa_pckev_b((v16i8)mask1, (v16i8)mask0);
554         tmp = __msa_bmz_v(tmp, src, (v16u8)mask);
555 
556         if (col == 0) {
557           uint64_t src_d;
558 
559           src_d = __msa_copy_u_d((v2i64)tmp_orig, 1);
560           SD(src_d, (src_dup - 8));
561         }
562 
563         src7 = LD_UB(src_dup + 16 * (col + 1) + 7);
564         src8 = LD_UB(src_dup + 16 * (col + 1) - 8);
565         ST_UB(tmp, (src_dup + (16 * col)));
566       }
567 
568       src_dup += pitch;
569     }
570   }
571 }
572 
vpx_mbpost_proc_down_msa(uint8_t * dst_ptr,int32_t pitch,int32_t rows,int32_t cols,int32_t flimit)573 void vpx_mbpost_proc_down_msa(uint8_t *dst_ptr, int32_t pitch, int32_t rows,
574                               int32_t cols, int32_t flimit) {
575   int32_t row, col, cnt, i;
576   v4i32 flimit_vec;
577   v16u8 dst7, dst8, dst_r_b, dst_l_b;
578   v16i8 mask;
579   v8u16 add_r, add_l;
580   v8i16 dst_r_h, dst_l_h, sub_r, sub_l, mask0, mask1;
581   v4i32 sub0, sub1, sub2, sub3, total0, total1, total2, total3;
582 
583   flimit_vec = __msa_fill_w(flimit);
584 
585   for (col = 0; col < (cols >> 4); ++col) {
586     uint8_t *dst_tmp = &dst_ptr[col << 4];
587     v16u8 dst;
588     v16i8 zero = { 0 };
589     v16u8 tmp[16];
590     v8i16 mult0, mult1, rv2_0, rv2_1;
591     v8i16 sum0_h = { 0 };
592     v8i16 sum1_h = { 0 };
593     v4i32 mul0 = { 0 };
594     v4i32 mul1 = { 0 };
595     v4i32 mul2 = { 0 };
596     v4i32 mul3 = { 0 };
597     v4i32 sum0_w, sum1_w, sum2_w, sum3_w;
598     v4i32 add0, add1, add2, add3;
599     const int16_t *rv2[16];
600 
601     dst = LD_UB(dst_tmp);
602     for (cnt = (col << 4), i = 0; i < 16; ++cnt) {
603       rv2[i] = vpx_rv + (i & 7);
604       ++i;
605     }
606     for (cnt = -8; cnt < 0; ++cnt) {
607       ST_UB(dst, dst_tmp + cnt * pitch);
608     }
609 
610     dst = LD_UB((dst_tmp + (rows - 1) * pitch));
611     for (cnt = rows; cnt < rows + 17; ++cnt) {
612       ST_UB(dst, dst_tmp + cnt * pitch);
613     }
614     for (cnt = -8; cnt <= 6; ++cnt) {
615       dst = LD_UB(dst_tmp + (cnt * pitch));
616       UNPCK_UB_SH(dst, dst_r_h, dst_l_h);
617       MUL2(dst_r_h, dst_r_h, dst_l_h, dst_l_h, mult0, mult1);
618       mul0 += (v4i32)__msa_ilvr_h((v8i16)zero, (v8i16)mult0);
619       mul1 += (v4i32)__msa_ilvl_h((v8i16)zero, (v8i16)mult0);
620       mul2 += (v4i32)__msa_ilvr_h((v8i16)zero, (v8i16)mult1);
621       mul3 += (v4i32)__msa_ilvl_h((v8i16)zero, (v8i16)mult1);
622       ADD2(sum0_h, dst_r_h, sum1_h, dst_l_h, sum0_h, sum1_h);
623     }
624 
625     for (row = 0; row < (rows + 8); ++row) {
626       for (i = 0; i < 8; ++i) {
627         rv2_0[i] = *(rv2[i] + (row & 127));
628         rv2_1[i] = *(rv2[i + 8] + (row & 127));
629       }
630       dst7 = LD_UB(dst_tmp + (7 * pitch));
631       dst8 = LD_UB(dst_tmp - (8 * pitch));
632       ILVRL_B2_UB(dst7, dst8, dst_r_b, dst_l_b);
633 
634       HSUB_UB2_SH(dst_r_b, dst_l_b, sub_r, sub_l);
635       UNPCK_SH_SW(sub_r, sub0, sub1);
636       UNPCK_SH_SW(sub_l, sub2, sub3);
637       sum0_h += sub_r;
638       sum1_h += sub_l;
639 
640       HADD_UB2_UH(dst_r_b, dst_l_b, add_r, add_l);
641 
642       ILVRL_H2_SW(zero, add_r, add0, add1);
643       ILVRL_H2_SW(zero, add_l, add2, add3);
644       mul0 += add0 * sub0;
645       mul1 += add1 * sub1;
646       mul2 += add2 * sub2;
647       mul3 += add3 * sub3;
648       dst = LD_UB(dst_tmp);
649       ILVRL_B2_SH(zero, dst, dst_r_h, dst_l_h);
650       dst7 = (v16u8)((rv2_0 + sum0_h + dst_r_h) >> 4);
651       dst8 = (v16u8)((rv2_1 + sum1_h + dst_l_h) >> 4);
652       tmp[row & 15] = (v16u8)__msa_pckev_b((v16i8)dst8, (v16i8)dst7);
653 
654       UNPCK_SH_SW(sum0_h, sum0_w, sum1_w);
655       UNPCK_SH_SW(sum1_h, sum2_w, sum3_w);
656       total0 = mul0 * __msa_ldi_w(15);
657       total0 -= sum0_w * sum0_w;
658       total1 = mul1 * __msa_ldi_w(15);
659       total1 -= sum1_w * sum1_w;
660       total2 = mul2 * __msa_ldi_w(15);
661       total2 -= sum2_w * sum2_w;
662       total3 = mul3 * __msa_ldi_w(15);
663       total3 -= sum3_w * sum3_w;
664       total0 = (total0 < flimit_vec);
665       total1 = (total1 < flimit_vec);
666       total2 = (total2 < flimit_vec);
667       total3 = (total3 < flimit_vec);
668       PCKEV_H2_SH(total1, total0, total3, total2, mask0, mask1);
669       mask = __msa_pckev_b((v16i8)mask1, (v16i8)mask0);
670       tmp[row & 15] = __msa_bmz_v(tmp[row & 15], dst, (v16u8)mask);
671 
672       if (row >= 8) {
673         ST_UB(tmp[(row - 8) & 15], (dst_tmp - 8 * pitch));
674       }
675 
676       dst_tmp += pitch;
677     }
678   }
679 }
680