1 /*
2  * Copyright (c) 2015 - 2017 Shivraj Patil (Shivraj.Patil@imgtec.com)
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <string.h>
22 #include "libavcodec/vp9dsp.h"
23 #include "libavutil/mips/generic_macros_msa.h"
24 #include "vp9dsp_mips.h"
25 
26 #define VP9_DCT_CONST_BITS   14
27 #define ROUND_POWER_OF_TWO(value, n)  (((value) + (1 << ((n) - 1))) >> (n))
28 
29 static const int32_t cospi_1_64 = 16364;
30 static const int32_t cospi_2_64 = 16305;
31 static const int32_t cospi_3_64 = 16207;
32 static const int32_t cospi_4_64 = 16069;
33 static const int32_t cospi_5_64 = 15893;
34 static const int32_t cospi_6_64 = 15679;
35 static const int32_t cospi_7_64 = 15426;
36 static const int32_t cospi_8_64 = 15137;
37 static const int32_t cospi_9_64 = 14811;
38 static const int32_t cospi_10_64 = 14449;
39 static const int32_t cospi_11_64 = 14053;
40 static const int32_t cospi_12_64 = 13623;
41 static const int32_t cospi_13_64 = 13160;
42 static const int32_t cospi_14_64 = 12665;
43 static const int32_t cospi_15_64 = 12140;
44 static const int32_t cospi_16_64 = 11585;
45 static const int32_t cospi_17_64 = 11003;
46 static const int32_t cospi_18_64 = 10394;
47 static const int32_t cospi_19_64 = 9760;
48 static const int32_t cospi_20_64 = 9102;
49 static const int32_t cospi_21_64 = 8423;
50 static const int32_t cospi_22_64 = 7723;
51 static const int32_t cospi_23_64 = 7005;
52 static const int32_t cospi_24_64 = 6270;
53 static const int32_t cospi_25_64 = 5520;
54 static const int32_t cospi_26_64 = 4756;
55 static const int32_t cospi_27_64 = 3981;
56 static const int32_t cospi_28_64 = 3196;
57 static const int32_t cospi_29_64 = 2404;
58 static const int32_t cospi_30_64 = 1606;
59 static const int32_t cospi_31_64 = 804;
60 
61 //  16384 * sqrt(2) * sin(kPi/9) * 2 / 3
62 static const int32_t sinpi_1_9 = 5283;
63 static const int32_t sinpi_2_9 = 9929;
64 static const int32_t sinpi_3_9 = 13377;
65 static const int32_t sinpi_4_9 = 15212;
66 
67 #define VP9_DOTP_CONST_PAIR(reg0, reg1, cnst0, cnst1, out0, out1)  \
68 {                                                                  \
69     v8i16 k0_m = __msa_fill_h(cnst0);                              \
70     v4i32 s0_m, s1_m, s2_m, s3_m;                                  \
71                                                                    \
72     s0_m = (v4i32) __msa_fill_h(cnst1);                            \
73     k0_m = __msa_ilvev_h((v8i16) s0_m, k0_m);                      \
74                                                                    \
75     ILVRL_H2_SW((-reg1), reg0, s1_m, s0_m);                        \
76     ILVRL_H2_SW(reg0, reg1, s3_m, s2_m);                           \
77     DOTP_SH2_SW(s1_m, s0_m, k0_m, k0_m, s1_m, s0_m);               \
78     SRARI_W2_SW(s1_m, s0_m, VP9_DCT_CONST_BITS);                   \
79     out0 = __msa_pckev_h((v8i16) s0_m, (v8i16) s1_m);              \
80                                                                    \
81     DOTP_SH2_SW(s3_m, s2_m, k0_m, k0_m, s1_m, s0_m);               \
82     SRARI_W2_SW(s1_m, s0_m, VP9_DCT_CONST_BITS);                   \
83     out1 = __msa_pckev_h((v8i16) s0_m, (v8i16) s1_m);              \
84 }
85 
86 #define VP9_DOT_ADD_SUB_SRARI_PCK(in0, in1, in2, in3, in4, in5, in6, in7,  \
87                                       dst0, dst1, dst2, dst3)              \
88 {                                                                          \
89     v4i32 tp0_m, tp1_m, tp2_m, tp3_m, tp4_m;                               \
90     v4i32 tp5_m, tp6_m, tp7_m, tp8_m, tp9_m;                               \
91                                                                            \
92     DOTP_SH4_SW(in0, in1, in0, in1, in4, in4, in5, in5,                    \
93                 tp0_m, tp2_m, tp3_m, tp4_m);                               \
94     DOTP_SH4_SW(in2, in3, in2, in3, in6, in6, in7, in7,                    \
95                 tp5_m, tp6_m, tp7_m, tp8_m);                               \
96     BUTTERFLY_4(tp0_m, tp3_m, tp7_m, tp5_m, tp1_m, tp9_m, tp7_m, tp5_m);   \
97     BUTTERFLY_4(tp2_m, tp4_m, tp8_m, tp6_m, tp3_m, tp0_m, tp4_m, tp2_m);   \
98     SRARI_W4_SW(tp1_m, tp9_m, tp7_m, tp5_m, VP9_DCT_CONST_BITS);           \
99     SRARI_W4_SW(tp3_m, tp0_m, tp4_m, tp2_m, VP9_DCT_CONST_BITS);           \
100     PCKEV_H4_SH(tp1_m, tp3_m, tp9_m, tp0_m, tp7_m, tp4_m, tp5_m, tp2_m,    \
101                 dst0, dst1, dst2, dst3);                                   \
102 }
103 
104 #define VP9_DOT_SHIFT_RIGHT_PCK_H(in0, in1, in2)          \
105 ( {                                                       \
106     v8i16 dst_m;                                          \
107     v4i32 tp0_m, tp1_m;                                   \
108                                                           \
109     DOTP_SH2_SW(in0, in1, in2, in2, tp1_m, tp0_m);        \
110     SRARI_W2_SW(tp1_m, tp0_m, VP9_DCT_CONST_BITS);        \
111     dst_m = __msa_pckev_h((v8i16) tp1_m, (v8i16) tp0_m);  \
112                                                           \
113     dst_m;                                                \
114 } )
115 
116 #define VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7,                 \
117                   out0, out1, out2, out3, out4, out5, out6, out7)         \
118 {                                                                         \
119     v8i16 cnst0_m, cnst1_m, cnst2_m, cnst3_m, cnst4_m;                    \
120     v8i16 vec0_m, vec1_m, vec2_m, vec3_m, s0_m, s1_m;                     \
121     v8i16 coeff0_m = { cospi_2_64, cospi_6_64, cospi_10_64, cospi_14_64,  \
122         cospi_18_64, cospi_22_64, cospi_26_64, cospi_30_64 };             \
123     v8i16 coeff1_m = { cospi_8_64, -cospi_8_64, cospi_16_64,              \
124         -cospi_16_64, cospi_24_64, -cospi_24_64, 0, 0 };                  \
125                                                                           \
126     SPLATI_H2_SH(coeff0_m, 0, 7, cnst0_m, cnst1_m);                       \
127     cnst2_m = -cnst0_m;                                                   \
128     ILVEV_H2_SH(cnst0_m, cnst1_m, cnst1_m, cnst2_m, cnst0_m, cnst1_m);    \
129     SPLATI_H2_SH(coeff0_m, 4, 3, cnst2_m, cnst3_m);                       \
130     cnst4_m = -cnst2_m;                                                   \
131     ILVEV_H2_SH(cnst2_m, cnst3_m, cnst3_m, cnst4_m, cnst2_m, cnst3_m);    \
132                                                                           \
133     ILVRL_H2_SH(in0, in7, vec1_m, vec0_m);                                \
134     ILVRL_H2_SH(in4, in3, vec3_m, vec2_m);                                \
135     VP9_DOT_ADD_SUB_SRARI_PCK(vec0_m, vec1_m, vec2_m, vec3_m, cnst0_m,    \
136                               cnst1_m, cnst2_m, cnst3_m, in7, in0,        \
137                               in4, in3);                                  \
138                                                                           \
139     SPLATI_H2_SH(coeff0_m, 2, 5, cnst0_m, cnst1_m);                       \
140     cnst2_m = -cnst0_m;                                                   \
141     ILVEV_H2_SH(cnst0_m, cnst1_m, cnst1_m, cnst2_m, cnst0_m, cnst1_m);    \
142     SPLATI_H2_SH(coeff0_m, 6, 1, cnst2_m, cnst3_m);                       \
143     cnst4_m = -cnst2_m;                                                   \
144     ILVEV_H2_SH(cnst2_m, cnst3_m, cnst3_m, cnst4_m, cnst2_m, cnst3_m);    \
145                                                                           \
146     ILVRL_H2_SH(in2, in5, vec1_m, vec0_m);                                \
147     ILVRL_H2_SH(in6, in1, vec3_m, vec2_m);                                \
148                                                                           \
149     VP9_DOT_ADD_SUB_SRARI_PCK(vec0_m, vec1_m, vec2_m, vec3_m, cnst0_m,    \
150                               cnst1_m, cnst2_m, cnst3_m, in5, in2,        \
151                               in6, in1);                                  \
152     BUTTERFLY_4(in7, in0, in2, in5, s1_m, s0_m, in2, in5);                \
153     out7 = -s0_m;                                                         \
154     out0 = s1_m;                                                          \
155                                                                           \
156     SPLATI_H4_SH(coeff1_m, 0, 4, 1, 5,                                    \
157                  cnst0_m, cnst1_m, cnst2_m, cnst3_m);                     \
158                                                                           \
159     ILVEV_H2_SH(cnst3_m, cnst0_m, cnst1_m, cnst2_m, cnst3_m, cnst2_m);    \
160     cnst0_m = __msa_ilvev_h(cnst1_m, cnst0_m);                            \
161     cnst1_m = cnst0_m;                                                    \
162                                                                           \
163     ILVRL_H2_SH(in4, in3, vec1_m, vec0_m);                                \
164     ILVRL_H2_SH(in6, in1, vec3_m, vec2_m);                                \
165     VP9_DOT_ADD_SUB_SRARI_PCK(vec0_m, vec1_m, vec2_m, vec3_m, cnst0_m,    \
166                               cnst2_m, cnst3_m, cnst1_m, out1, out6,      \
167                               s0_m, s1_m);                                \
168                                                                           \
169     SPLATI_H2_SH(coeff1_m, 2, 3, cnst0_m, cnst1_m);                       \
170     cnst1_m = __msa_ilvev_h(cnst1_m, cnst0_m);                            \
171                                                                           \
172     ILVRL_H2_SH(in2, in5, vec1_m, vec0_m);                                \
173     ILVRL_H2_SH(s0_m, s1_m, vec3_m, vec2_m);                              \
174     out3 = VP9_DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst0_m);            \
175     out4 = VP9_DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst1_m);            \
176     out2 = VP9_DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst0_m);            \
177     out5 = VP9_DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst1_m);            \
178                                                                           \
179     out1 = -out1;                                                         \
180     out3 = -out3;                                                         \
181     out5 = -out5;                                                         \
182 }
183 
184 #define VP9_MADD_SHORT(m0, m1, c0, c1, res0, res1)                        \
185 {                                                                         \
186     v4i32 madd0_m, madd1_m, madd2_m, madd3_m;                             \
187     v8i16 madd_s0_m, madd_s1_m;                                           \
188                                                                           \
189     ILVRL_H2_SH(m1, m0, madd_s0_m, madd_s1_m);                            \
190     DOTP_SH4_SW(madd_s0_m, madd_s1_m, madd_s0_m, madd_s1_m,               \
191                 c0, c0, c1, c1, madd0_m, madd1_m, madd2_m, madd3_m);      \
192     SRARI_W4_SW(madd0_m, madd1_m, madd2_m, madd3_m, VP9_DCT_CONST_BITS);  \
193     PCKEV_H2_SH(madd1_m, madd0_m, madd3_m, madd2_m, res0, res1);          \
194 }
195 
196 #define VP9_MADD_BF(inp0, inp1, inp2, inp3, cst0, cst1, cst2, cst3,       \
197                     out0, out1, out2, out3)                               \
198 {                                                                         \
199     v8i16 madd_s0_m, madd_s1_m, madd_s2_m, madd_s3_m;                     \
200     v4i32 tmp0_m, tmp1_m, tmp2_m, tmp3_m, m4_m, m5_m;                     \
201                                                                           \
202     ILVRL_H2_SH(inp1, inp0, madd_s0_m, madd_s1_m);                        \
203     ILVRL_H2_SH(inp3, inp2, madd_s2_m, madd_s3_m);                        \
204     DOTP_SH4_SW(madd_s0_m, madd_s1_m, madd_s2_m, madd_s3_m,               \
205                 cst0, cst0, cst2, cst2, tmp0_m, tmp1_m, tmp2_m, tmp3_m);  \
206     BUTTERFLY_4(tmp0_m, tmp1_m, tmp3_m, tmp2_m,                           \
207                 m4_m, m5_m, tmp3_m, tmp2_m);                              \
208     SRARI_W4_SW(m4_m, m5_m, tmp2_m, tmp3_m, VP9_DCT_CONST_BITS);          \
209     PCKEV_H2_SH(m5_m, m4_m, tmp3_m, tmp2_m, out0, out1);                  \
210     DOTP_SH4_SW(madd_s0_m, madd_s1_m, madd_s2_m, madd_s3_m,               \
211                 cst1, cst1, cst3, cst3, tmp0_m, tmp1_m, tmp2_m, tmp3_m);  \
212     BUTTERFLY_4(tmp0_m, tmp1_m, tmp3_m, tmp2_m,                           \
213                 m4_m, m5_m, tmp3_m, tmp2_m);                              \
214     SRARI_W4_SW(m4_m, m5_m, tmp2_m, tmp3_m, VP9_DCT_CONST_BITS);          \
215     PCKEV_H2_SH(m5_m, m4_m, tmp3_m, tmp2_m, out2, out3);                  \
216 }
217 
218 #define VP9_SET_COSPI_PAIR(c0_h, c1_h)   \
219 ( {                                      \
220     v8i16 out0_m, r0_m, r1_m;            \
221                                          \
222     r0_m = __msa_fill_h(c0_h);           \
223     r1_m = __msa_fill_h(c1_h);           \
224     out0_m = __msa_ilvev_h(r1_m, r0_m);  \
225                                          \
226     out0_m;                              \
227 } )
228 
229 #define VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3)  \
230 {                                                                 \
231     uint8_t *dst_m = (uint8_t *) (dst);                           \
232     v16u8 dst0_m, dst1_m, dst2_m, dst3_m;                         \
233     v16i8 tmp0_m, tmp1_m;                                         \
234     v16i8 zero_m = { 0 };                                         \
235     v8i16 res0_m, res1_m, res2_m, res3_m;                         \
236                                                                   \
237     LD_UB4(dst_m, dst_stride, dst0_m, dst1_m, dst2_m, dst3_m);    \
238     ILVR_B4_SH(zero_m, dst0_m, zero_m, dst1_m, zero_m, dst2_m,    \
239                zero_m, dst3_m, res0_m, res1_m, res2_m, res3_m);   \
240     ADD4(res0_m, in0, res1_m, in1, res2_m, in2, res3_m, in3,      \
241          res0_m, res1_m, res2_m, res3_m);                         \
242     CLIP_SH4_0_255(res0_m, res1_m, res2_m, res3_m);               \
243     PCKEV_B2_SB(res1_m, res0_m, res3_m, res2_m, tmp0_m, tmp1_m);  \
244     ST_D4(tmp0_m, tmp1_m, 0, 1, 0, 1, dst_m, dst_stride);         \
245 }
246 
247 #define VP9_IDCT4x4(in0, in1, in2, in3, out0, out1, out2, out3)       \
248 {                                                                     \
249     v8i16 c0_m, c1_m, c2_m, c3_m;                                     \
250     v8i16 step0_m, step1_m;                                           \
251     v4i32 tmp0_m, tmp1_m, tmp2_m, tmp3_m;                             \
252     v16i8 zeros = { 0 };                                              \
253                                                                       \
254     c0_m = VP9_SET_COSPI_PAIR(cospi_16_64, cospi_16_64);              \
255     c1_m = VP9_SET_COSPI_PAIR(cospi_16_64, -cospi_16_64);             \
256     step0_m = __msa_ilvr_h(in2, in0);                                 \
257     DOTP_SH2_SW(step0_m, step0_m, c0_m, c1_m, tmp0_m, tmp1_m);        \
258                                                                       \
259     c2_m = VP9_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64);              \
260     c3_m = VP9_SET_COSPI_PAIR(cospi_8_64, cospi_24_64);               \
261     step1_m = __msa_ilvr_h(in3, in1);                                 \
262     DOTP_SH2_SW(step1_m, step1_m, c2_m, c3_m, tmp2_m, tmp3_m);        \
263     SRARI_W4_SW(tmp0_m, tmp1_m, tmp2_m, tmp3_m, VP9_DCT_CONST_BITS);  \
264                                                                       \
265     PCKEV_H2_SW(tmp1_m, tmp0_m, tmp3_m, tmp2_m, tmp0_m, tmp2_m);      \
266     SLDI_B2_SW(zeros, tmp0_m, zeros, tmp2_m, 8, tmp1_m, tmp3_m);      \
267     BUTTERFLY_4((v8i16) tmp0_m, (v8i16) tmp1_m,                       \
268                 (v8i16) tmp2_m, (v8i16) tmp3_m,                       \
269                 out0, out1, out2, out3);                              \
270 }
271 
272 #define VP9_IADST4x4(in0, in1, in2, in3, out0, out1, out2, out3)      \
273 {                                                                     \
274     v8i16 res0_m, res1_m, c0_m, c1_m;                                 \
275     v8i16 k1_m, k2_m, k3_m, k4_m;                                     \
276     v8i16 zero_m = { 0 };                                             \
277     v4i32 tmp0_m, tmp1_m, tmp2_m, tmp3_m;                             \
278     v4i32 int0_m, int1_m, int2_m, int3_m;                             \
279     v8i16 mask_m = { sinpi_1_9, sinpi_2_9, sinpi_3_9,                 \
280         sinpi_4_9, -sinpi_1_9, -sinpi_2_9, -sinpi_3_9,                \
281         -sinpi_4_9 };                                                 \
282                                                                       \
283     SPLATI_H4_SH(mask_m, 3, 0, 1, 2, c0_m, c1_m, k1_m, k2_m);         \
284     ILVEV_H2_SH(c0_m, c1_m, k1_m, k2_m, c0_m, c1_m);                  \
285     ILVR_H2_SH(in0, in2, in1, in3, res0_m, res1_m);                   \
286     DOTP_SH2_SW(res0_m, res1_m, c0_m, c1_m, tmp2_m, tmp1_m);          \
287     int0_m = tmp2_m + tmp1_m;                                         \
288                                                                       \
289     SPLATI_H2_SH(mask_m, 4, 7, k4_m, k3_m);                           \
290     ILVEV_H2_SH(k4_m, k1_m, k3_m, k2_m, c0_m, c1_m);                  \
291     DOTP_SH2_SW(res0_m, res1_m, c0_m, c1_m, tmp0_m, tmp1_m);          \
292     int1_m = tmp0_m + tmp1_m;                                         \
293                                                                       \
294     c0_m = __msa_splati_h(mask_m, 6);                                 \
295     ILVL_H2_SH(k2_m, c0_m, zero_m, k2_m, c0_m, c1_m);                 \
296     ILVR_H2_SH(in0, in2, in1, in3, res0_m, res1_m);                   \
297     DOTP_SH2_SW(res0_m, res1_m, c0_m, c1_m, tmp0_m, tmp1_m);          \
298     int2_m = tmp0_m + tmp1_m;                                         \
299                                                                       \
300     c0_m = __msa_splati_h(mask_m, 6);                                 \
301     c0_m = __msa_ilvev_h(c0_m, k1_m);                                 \
302                                                                       \
303     res0_m = __msa_ilvr_h((in1), (in3));                              \
304     tmp0_m = __msa_dotp_s_w(res0_m, c0_m);                            \
305     int3_m = tmp2_m + tmp0_m;                                         \
306                                                                       \
307     res0_m = __msa_ilvr_h((in2), (in3));                              \
308     c1_m = __msa_ilvev_h(k4_m, k3_m);                                 \
309                                                                       \
310     tmp2_m = __msa_dotp_s_w(res0_m, c1_m);                            \
311     res1_m = __msa_ilvr_h((in0), (in2));                              \
312     c1_m = __msa_ilvev_h(k1_m, zero_m);                               \
313                                                                       \
314     tmp3_m = __msa_dotp_s_w(res1_m, c1_m);                            \
315     int3_m += tmp2_m;                                                 \
316     int3_m += tmp3_m;                                                 \
317                                                                       \
318     SRARI_W4_SW(int0_m, int1_m, int2_m, int3_m, VP9_DCT_CONST_BITS);  \
319     PCKEV_H2_SH(int0_m, int0_m, int1_m, int1_m, out0, out1);          \
320     PCKEV_H2_SH(int2_m, int2_m, int3_m, int3_m, out2, out3);          \
321 }
322 
323 #define TRANSPOSE4X8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,          \
324                            out0, out1, out2, out3, out4, out5, out6, out7)  \
325 {                                                                           \
326     v8i16 tmp0_m, tmp1_m, tmp2_m, tmp3_m;                                   \
327     v8i16 tmp0_n, tmp1_n, tmp2_n, tmp3_n;                                   \
328     v8i16 zero_m = { 0 };                                                   \
329                                                                             \
330     ILVR_H4_SH(in1, in0, in3, in2, in5, in4, in7, in6,                      \
331                tmp0_n, tmp1_n, tmp2_n, tmp3_n);                             \
332     ILVRL_W2_SH(tmp1_n, tmp0_n, tmp0_m, tmp2_m);                            \
333     ILVRL_W2_SH(tmp3_n, tmp2_n, tmp1_m, tmp3_m);                            \
334                                                                             \
335     out0 = (v8i16) __msa_ilvr_d((v2i64) tmp1_m, (v2i64) tmp0_m);            \
336     out1 = (v8i16) __msa_ilvl_d((v2i64) tmp1_m, (v2i64) tmp0_m);            \
337     out2 = (v8i16) __msa_ilvr_d((v2i64) tmp3_m, (v2i64) tmp2_m);            \
338     out3 = (v8i16) __msa_ilvl_d((v2i64) tmp3_m, (v2i64) tmp2_m);            \
339                                                                             \
340     out4 = zero_m;                                                          \
341     out5 = zero_m;                                                          \
342     out6 = zero_m;                                                          \
343     out7 = zero_m;                                                          \
344 }
345 
vp9_idct4x4_1_add_msa(int16_t * input,uint8_t * dst,int32_t dst_stride)346 static void vp9_idct4x4_1_add_msa(int16_t *input, uint8_t *dst,
347                                   int32_t dst_stride)
348 {
349     int16_t out;
350     v8i16 vec;
351 
352     out = ROUND_POWER_OF_TWO((input[0] * cospi_16_64), VP9_DCT_CONST_BITS);
353     out = ROUND_POWER_OF_TWO((out * cospi_16_64), VP9_DCT_CONST_BITS);
354     out = ROUND_POWER_OF_TWO(out, 4);
355     vec = __msa_fill_h(out);
356     input[0] = 0;
357 
358     ADDBLK_ST4x4_UB(vec, vec, vec, vec, dst, dst_stride);
359 }
360 
vp9_idct4x4_colcol_addblk_msa(int16_t * input,uint8_t * dst,int32_t dst_stride)361 static void vp9_idct4x4_colcol_addblk_msa(int16_t *input, uint8_t *dst,
362                                           int32_t dst_stride)
363 {
364     v8i16 in0, in1, in2, in3;
365     v8i16 zero = { 0 };
366 
367     /* load vector elements of 4x4 block */
368     in0 = LD_SH(input);
369     in2 = LD_SH(input + 8);
370     in1 = (v8i16) __msa_ilvl_d((v2i64) in0, (v2i64) in0);
371     in3 = (v8i16) __msa_ilvl_d((v2i64) in2, (v2i64) in2);
372     ST_SH2(zero, zero, input, 8);
373     /* rows */
374     VP9_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3);
375     /* columns */
376     TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
377     VP9_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3);
378     /* rounding (add 2^3, divide by 2^4) */
379     SRARI_H4_SH(in0, in1, in2, in3, 4);
380     ADDBLK_ST4x4_UB(in0, in1, in2, in3, dst, dst_stride);
381 }
382 
vp9_iadst4x4_colcol_addblk_msa(int16_t * input,uint8_t * dst,int32_t dst_stride)383 static void vp9_iadst4x4_colcol_addblk_msa(int16_t *input, uint8_t *dst,
384                                            int32_t dst_stride)
385 {
386     v8i16 in0, in1, in2, in3;
387     v8i16 zero = { 0 };
388 
389     /* load vector elements of 4x4 block */
390     in0 = LD_SH(input);
391     in2 = LD_SH(input + 8);
392     in1 = (v8i16) __msa_ilvl_d((v2i64) in0, (v2i64) in0);
393     in3 = (v8i16) __msa_ilvl_d((v2i64) in2, (v2i64) in2);
394     ST_SH2(zero, zero, input, 8);
395     /* rows */
396     VP9_IADST4x4(in0, in1, in2, in3, in0, in1, in2, in3);
397     /* columns */
398     TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
399     VP9_IADST4x4(in0, in1, in2, in3, in0, in1, in2, in3);
400     /* rounding (add 2^3, divide by 2^4) */
401     SRARI_H4_SH(in0, in1, in2, in3, 4);
402     ADDBLK_ST4x4_UB(in0, in1, in2, in3, dst, dst_stride);
403 }
404 
vp9_iadst_idct_4x4_add_msa(int16_t * input,uint8_t * dst,int32_t dst_stride,int32_t eob)405 static void vp9_iadst_idct_4x4_add_msa(int16_t *input, uint8_t *dst,
406                                        int32_t dst_stride, int32_t eob)
407 {
408     v8i16 in0, in1, in2, in3;
409     v8i16 zero = { 0 };
410 
411     /* load vector elements of 4x4 block */
412     in0 = LD_SH(input);
413     in2 = LD_SH(input + 8);
414     in1 = (v8i16) __msa_ilvl_d((v2i64) in0, (v2i64) in0);
415     in3 = (v8i16) __msa_ilvl_d((v2i64) in2, (v2i64) in2);
416     ST_SH2(zero, zero, input, 8);
417     /* cols */
418     VP9_IADST4x4(in0, in1, in2, in3, in0, in1, in2, in3);
419     /* columns */
420     TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
421     VP9_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3);
422     /* rounding (add 2^3, divide by 2^4) */
423     SRARI_H4_SH(in0, in1, in2, in3, 4);
424     ADDBLK_ST4x4_UB(in0, in1, in2, in3, dst, dst_stride);
425 }
426 
vp9_idct_iadst_4x4_add_msa(int16_t * input,uint8_t * dst,int32_t dst_stride,int32_t eob)427 static void vp9_idct_iadst_4x4_add_msa(int16_t *input, uint8_t *dst,
428                                        int32_t dst_stride, int32_t eob)
429 {
430     v8i16 in0, in1, in2, in3;
431     v8i16 zero = { 0 };
432 
433     /* load vector elements of 4x4 block */
434     in0 = LD_SH(input);
435     in2 = LD_SH(input + 8);
436     in1 = (v8i16) __msa_ilvl_d((v2i64) in0, (v2i64) in0);
437     in3 = (v8i16) __msa_ilvl_d((v2i64) in2, (v2i64) in2);
438     ST_SH2(zero, zero, input, 8);
439     /* cols */
440     VP9_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3);
441     /* columns */
442     TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
443     VP9_IADST4x4(in0, in1, in2, in3, in0, in1, in2, in3);
444     /* rounding (add 2^3, divide by 2^4) */
445     SRARI_H4_SH(in0, in1, in2, in3, 4);
446     ADDBLK_ST4x4_UB(in0, in1, in2, in3, dst, dst_stride);
447 }
448 
449 #define VP9_SET_CONST_PAIR(mask_h, idx1_h, idx2_h)     \
450 ( {                                                    \
451     v8i16 c0_m, c1_m;                                  \
452                                                        \
453     SPLATI_H2_SH(mask_h, idx1_h, idx2_h, c0_m, c1_m);  \
454     c0_m = __msa_ilvev_h(c1_m, c0_m);                  \
455                                                        \
456     c0_m;                                              \
457 } )
458 
459 /* multiply and add macro */
460 #define VP9_MADD(inp0, inp1, inp2, inp3, cst0, cst1, cst2, cst3,          \
461                  out0, out1, out2, out3)                                  \
462 {                                                                         \
463     v8i16 madd_s0_m, madd_s1_m, madd_s2_m, madd_s3_m;                     \
464     v4i32 tmp0_m, tmp1_m, tmp2_m, tmp3_m;                                 \
465                                                                           \
466     ILVRL_H2_SH(inp1, inp0, madd_s1_m, madd_s0_m);                        \
467     ILVRL_H2_SH(inp3, inp2, madd_s3_m, madd_s2_m);                        \
468     DOTP_SH4_SW(madd_s1_m, madd_s0_m, madd_s1_m, madd_s0_m,               \
469                 cst0, cst0, cst1, cst1, tmp0_m, tmp1_m, tmp2_m, tmp3_m);  \
470     SRARI_W4_SW(tmp0_m, tmp1_m, tmp2_m, tmp3_m, VP9_DCT_CONST_BITS);      \
471     PCKEV_H2_SH(tmp1_m, tmp0_m, tmp3_m, tmp2_m, out0, out1);              \
472     DOTP_SH4_SW(madd_s3_m, madd_s2_m, madd_s3_m, madd_s2_m,               \
473                 cst2, cst2, cst3, cst3, tmp0_m, tmp1_m, tmp2_m, tmp3_m);  \
474     SRARI_W4_SW(tmp0_m, tmp1_m, tmp2_m, tmp3_m, VP9_DCT_CONST_BITS);      \
475     PCKEV_H2_SH(tmp1_m, tmp0_m, tmp3_m, tmp2_m, out2, out3);              \
476 }
477 
478 /* idct 8x8 macro */
479 #define VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7,                 \
480                        out0, out1, out2, out3, out4, out5, out6, out7)         \
481 {                                                                              \
482     v8i16 tp0_m, tp1_m, tp2_m, tp3_m, tp4_m, tp5_m, tp6_m, tp7_m;              \
483     v8i16 k0_m, k1_m, k2_m, k3_m, res0_m, res1_m, res2_m, res3_m;              \
484     v4i32 tmp0_m, tmp1_m, tmp2_m, tmp3_m;                                      \
485     v8i16 mask_m = { cospi_28_64, cospi_4_64, cospi_20_64, cospi_12_64,        \
486        cospi_16_64, -cospi_4_64, -cospi_20_64, -cospi_16_64 };                 \
487                                                                                \
488     k0_m = VP9_SET_CONST_PAIR(mask_m, 0, 5);                                   \
489     k1_m = VP9_SET_CONST_PAIR(mask_m, 1, 0);                                   \
490     k2_m = VP9_SET_CONST_PAIR(mask_m, 6, 3);                                   \
491     k3_m = VP9_SET_CONST_PAIR(mask_m, 3, 2);                                   \
492     VP9_MADD(in1, in7, in3, in5, k0_m, k1_m, k2_m, k3_m, in1, in7, in3, in5);  \
493     SUB2(in1, in3, in7, in5, res0_m, res1_m);                                  \
494     k0_m = VP9_SET_CONST_PAIR(mask_m, 4, 7);                                   \
495     k1_m = __msa_splati_h(mask_m, 4);                                          \
496                                                                                \
497     ILVRL_H2_SH(res0_m, res1_m, res2_m, res3_m);                               \
498     DOTP_SH4_SW(res2_m, res3_m, res2_m, res3_m, k0_m, k0_m, k1_m, k1_m,        \
499                 tmp0_m, tmp1_m, tmp2_m, tmp3_m);                               \
500     SRARI_W4_SW(tmp0_m, tmp1_m, tmp2_m, tmp3_m, VP9_DCT_CONST_BITS);           \
501     tp4_m = in1 + in3;                                                         \
502     PCKEV_H2_SH(tmp1_m, tmp0_m, tmp3_m, tmp2_m, tp5_m, tp6_m);                 \
503     tp7_m = in7 + in5;                                                         \
504     k2_m = VP9_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64);                       \
505     k3_m = VP9_SET_COSPI_PAIR(cospi_8_64, cospi_24_64);                        \
506     VP9_MADD(in0, in4, in2, in6, k1_m, k0_m, k2_m, k3_m,                       \
507              in0, in4, in2, in6);                                              \
508     BUTTERFLY_4(in0, in4, in2, in6, tp0_m, tp1_m, tp2_m, tp3_m);               \
509     BUTTERFLY_8(tp0_m, tp1_m, tp2_m, tp3_m, tp4_m, tp5_m, tp6_m, tp7_m,        \
510                 out0, out1, out2, out3, out4, out5, out6, out7);               \
511 }
512 
513 #define VP9_IADST8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7,              \
514                         out0, out1, out2, out3, out4, out5, out6, out7)      \
515 {                                                                            \
516     v4i32 r0_m, r1_m, r2_m, r3_m, r4_m, r5_m, r6_m, r7_m;                    \
517     v4i32 m0_m, m1_m, m2_m, m3_m, t0_m, t1_m;                                \
518     v8i16 res0_m, res1_m, res2_m, res3_m, k0_m, k1_m, in_s0, in_s1;          \
519     v8i16 mask1_m = { cospi_2_64, cospi_30_64, -cospi_2_64,                  \
520         cospi_10_64, cospi_22_64, -cospi_10_64, cospi_18_64, cospi_14_64 };  \
521     v8i16 mask2_m = { cospi_14_64, -cospi_18_64, cospi_26_64,                \
522         cospi_6_64, -cospi_26_64, cospi_8_64, cospi_24_64, -cospi_8_64 };    \
523     v8i16 mask3_m = { -cospi_24_64, cospi_8_64, cospi_16_64,                 \
524         -cospi_16_64, 0, 0, 0, 0 };                                          \
525                                                                              \
526     k0_m = VP9_SET_CONST_PAIR(mask1_m, 0, 1);                                \
527     k1_m = VP9_SET_CONST_PAIR(mask1_m, 1, 2);                                \
528     ILVRL_H2_SH(in1, in0, in_s1, in_s0);                                     \
529     DOTP_SH4_SW(in_s1, in_s0, in_s1, in_s0, k0_m, k0_m, k1_m, k1_m,          \
530                 r0_m, r1_m, r2_m, r3_m);                                     \
531     k0_m = VP9_SET_CONST_PAIR(mask1_m, 6, 7);                                \
532     k1_m = VP9_SET_CONST_PAIR(mask2_m, 0, 1);                                \
533     ILVRL_H2_SH(in5, in4, in_s1, in_s0);                                     \
534     DOTP_SH4_SW(in_s1, in_s0, in_s1, in_s0, k0_m, k0_m, k1_m, k1_m,          \
535                 r4_m, r5_m, r6_m, r7_m);                                     \
536     ADD4(r0_m, r4_m, r1_m, r5_m, r2_m, r6_m, r3_m, r7_m,                     \
537          m0_m, m1_m, m2_m, m3_m);                                            \
538     SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, VP9_DCT_CONST_BITS);                 \
539     PCKEV_H2_SH(m1_m, m0_m, m3_m, m2_m, res0_m, res1_m);                     \
540     SUB4(r0_m, r4_m, r1_m, r5_m, r2_m, r6_m, r3_m, r7_m,                     \
541          m0_m, m1_m, m2_m, m3_m);                                            \
542     SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, VP9_DCT_CONST_BITS);                 \
543     PCKEV_H2_SW(m1_m, m0_m, m3_m, m2_m, t0_m, t1_m);                         \
544     k0_m = VP9_SET_CONST_PAIR(mask1_m, 3, 4);                                \
545     k1_m = VP9_SET_CONST_PAIR(mask1_m, 4, 5);                                \
546     ILVRL_H2_SH(in3, in2, in_s1, in_s0);                                     \
547     DOTP_SH4_SW(in_s1, in_s0, in_s1, in_s0, k0_m, k0_m, k1_m, k1_m,          \
548                 r0_m, r1_m, r2_m, r3_m);                                     \
549     k0_m = VP9_SET_CONST_PAIR(mask2_m, 2, 3);                                \
550     k1_m = VP9_SET_CONST_PAIR(mask2_m, 3, 4);                                \
551     ILVRL_H2_SH(in7, in6, in_s1, in_s0);                                     \
552     DOTP_SH4_SW(in_s1, in_s0, in_s1, in_s0, k0_m, k0_m, k1_m, k1_m,          \
553                 r4_m, r5_m, r6_m, r7_m);                                     \
554     ADD4(r0_m, r4_m, r1_m, r5_m, r2_m, r6_m, r3_m, r7_m,                     \
555          m0_m, m1_m, m2_m, m3_m);                                            \
556     SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, VP9_DCT_CONST_BITS);                 \
557     PCKEV_H2_SH(m1_m, m0_m, m3_m, m2_m, res2_m, res3_m);                     \
558     SUB4(r0_m, r4_m, r1_m, r5_m, r2_m, r6_m, r3_m, r7_m,                     \
559          m0_m, m1_m, m2_m, m3_m);                                            \
560     SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, VP9_DCT_CONST_BITS);                 \
561     PCKEV_H2_SW(m1_m, m0_m, m3_m, m2_m, r2_m, r3_m);                         \
562     ILVRL_H2_SW(r3_m, r2_m, m2_m, m3_m);                                     \
563     BUTTERFLY_4(res0_m, res1_m, res3_m, res2_m, out0, in7, in4, in3);        \
564     k0_m = VP9_SET_CONST_PAIR(mask2_m, 5, 6);                                \
565     k1_m = VP9_SET_CONST_PAIR(mask2_m, 6, 7);                                \
566     ILVRL_H2_SH(t1_m, t0_m, in_s1, in_s0);                                   \
567     DOTP_SH4_SW(in_s1, in_s0, in_s1, in_s0, k0_m, k0_m, k1_m, k1_m,          \
568                 r0_m, r1_m, r2_m, r3_m);                                     \
569     k1_m = VP9_SET_CONST_PAIR(mask3_m, 0, 1);                                \
570     DOTP_SH4_SW(m2_m, m3_m, m2_m, m3_m, k0_m, k0_m, k1_m, k1_m,              \
571                 r4_m, r5_m, r6_m, r7_m);                                     \
572     ADD4(r0_m, r6_m, r1_m, r7_m, r2_m, r4_m, r3_m, r5_m,                     \
573          m0_m, m1_m, m2_m, m3_m);                                            \
574     SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, VP9_DCT_CONST_BITS);                 \
575     PCKEV_H2_SH(m1_m, m0_m, m3_m, m2_m, in1, out6);                          \
576     SUB4(r0_m, r6_m, r1_m, r7_m, r2_m, r4_m, r3_m, r5_m,                     \
577          m0_m, m1_m, m2_m, m3_m);                                            \
578     SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, VP9_DCT_CONST_BITS);                 \
579     PCKEV_H2_SH(m1_m, m0_m, m3_m, m2_m, in2, in5);                           \
580     k0_m = VP9_SET_CONST_PAIR(mask3_m, 2, 2);                                \
581     k1_m = VP9_SET_CONST_PAIR(mask3_m, 2, 3);                                \
582     ILVRL_H2_SH(in4, in3, in_s1, in_s0);                                     \
583     DOTP_SH4_SW(in_s1, in_s0, in_s1, in_s0, k0_m, k0_m, k1_m, k1_m,          \
584                 m0_m, m1_m, m2_m, m3_m);                                     \
585     SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, VP9_DCT_CONST_BITS);                 \
586     PCKEV_H2_SH(m1_m, m0_m, m3_m, m2_m, in3, out4);                          \
587     ILVRL_H2_SW(in5, in2, m2_m, m3_m);                                       \
588     DOTP_SH4_SW(m2_m, m3_m, m2_m, m3_m, k0_m, k0_m, k1_m, k1_m,              \
589                 m0_m, m1_m, m2_m, m3_m);                                     \
590     SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, VP9_DCT_CONST_BITS);                 \
591     PCKEV_H2_SH(m1_m, m0_m, m3_m, m2_m, out2, in5);                          \
592                                                                              \
593     out1 = -in1;                                                             \
594     out3 = -in3;                                                             \
595     out5 = -in5;                                                             \
596     out7 = -in7;                                                             \
597 }
598 
vp9_idct8x8_1_add_msa(int16_t * input,uint8_t * dst,int32_t dst_stride)599 static void vp9_idct8x8_1_add_msa(int16_t *input, uint8_t *dst,
600                                   int32_t dst_stride)
601 {
602     int16_t out;
603     int32_t val;
604     v8i16 vec;
605 
606     out = ROUND_POWER_OF_TWO((input[0] * cospi_16_64), VP9_DCT_CONST_BITS);
607     out = ROUND_POWER_OF_TWO((out * cospi_16_64), VP9_DCT_CONST_BITS);
608     val = ROUND_POWER_OF_TWO(out, 5);
609     vec = __msa_fill_h(val);
610     input[0] = 0;
611 
612     VP9_ADDBLK_ST8x4_UB(dst, dst_stride, vec, vec, vec, vec);
613     dst += (4 * dst_stride);
614     VP9_ADDBLK_ST8x4_UB(dst, dst_stride, vec, vec, vec, vec);
615 }
616 
vp9_idct8x8_12_colcol_addblk_msa(int16_t * input,uint8_t * dst,int32_t dst_stride)617 static void vp9_idct8x8_12_colcol_addblk_msa(int16_t *input, uint8_t *dst,
618                                              int32_t dst_stride)
619 {
620     v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
621     v8i16 s0, s1, s2, s3, s4, s5, s6, s7, k0, k1, k2, k3, m0, m1, m2, m3;
622     v4i32 tmp0, tmp1, tmp2, tmp3;
623     v8i16 zero = { 0 };
624 
625     /* load vector elements of 8x8 block */
626     LD_SH8(input, 8, in0, in1, in2, in3, in4, in5, in6, in7);
627     ST_SH8(zero, zero, zero, zero, zero, zero, zero, zero, input, 8);
628     ILVR_D2_SH(in1, in0, in3, in2, in0, in1);
629     ILVR_D2_SH(in5, in4, in7, in6, in2, in3);
630 
631     /* stage1 */
632     ILVL_H2_SH(in3, in0, in2, in1, s0, s1);
633     k0 = VP9_SET_COSPI_PAIR(cospi_28_64, -cospi_4_64);
634     k1 = VP9_SET_COSPI_PAIR(cospi_4_64, cospi_28_64);
635     k2 = VP9_SET_COSPI_PAIR(-cospi_20_64, cospi_12_64);
636     k3 = VP9_SET_COSPI_PAIR(cospi_12_64, cospi_20_64);
637     DOTP_SH4_SW(s0, s0, s1, s1, k0, k1, k2, k3, tmp0, tmp1, tmp2, tmp3);
638     SRARI_W4_SW(tmp0, tmp1, tmp2, tmp3, VP9_DCT_CONST_BITS);
639     PCKEV_H2_SH(zero, tmp0, zero, tmp1, s0, s1);
640     PCKEV_H2_SH(zero, tmp2, zero, tmp3, s2, s3);
641     BUTTERFLY_4(s0, s1, s3, s2, s4, s7, s6, s5);
642 
643     /* stage2 */
644     ILVR_H2_SH(in3, in1, in2, in0, s1, s0);
645     k0 = VP9_SET_COSPI_PAIR(cospi_16_64, cospi_16_64);
646     k1 = VP9_SET_COSPI_PAIR(cospi_16_64, -cospi_16_64);
647     k2 = VP9_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64);
648     k3 = VP9_SET_COSPI_PAIR(cospi_8_64, cospi_24_64);
649     DOTP_SH4_SW(s0, s0, s1, s1, k0, k1, k2, k3, tmp0, tmp1, tmp2, tmp3);
650     SRARI_W4_SW(tmp0, tmp1, tmp2, tmp3, VP9_DCT_CONST_BITS);
651     PCKEV_H2_SH(zero, tmp0, zero, tmp1, s0, s1);
652     PCKEV_H2_SH(zero, tmp2, zero, tmp3, s2, s3);
653     BUTTERFLY_4(s0, s1, s2, s3, m0, m1, m2, m3);
654 
655     /* stage3 */
656     s0 = __msa_ilvr_h(s6, s5);
657 
658     k1 = VP9_SET_COSPI_PAIR(-cospi_16_64, cospi_16_64);
659     DOTP_SH2_SW(s0, s0, k1, k0, tmp0, tmp1);
660     SRARI_W2_SW(tmp0, tmp1, VP9_DCT_CONST_BITS);
661     PCKEV_H2_SH(zero, tmp0, zero, tmp1, s2, s3);
662 
663     /* stage4 */
664     BUTTERFLY_8(m0, m1, m2, m3, s4, s2, s3, s7,
665                 in0, in1, in2, in3, in4, in5, in6, in7);
666     TRANSPOSE4X8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
667                        in0, in1, in2, in3, in4, in5, in6, in7);
668     VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7,
669                    in0, in1, in2, in3, in4, in5, in6, in7);
670 
671     /* final rounding (add 2^4, divide by 2^5) and shift */
672     SRARI_H4_SH(in0, in1, in2, in3, 5);
673     SRARI_H4_SH(in4, in5, in6, in7, 5);
674 
675     /* add block and store 8x8 */
676     VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3);
677     dst += (4 * dst_stride);
678     VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in4, in5, in6, in7);
679 }
680 
vp9_idct8x8_colcol_addblk_msa(int16_t * input,uint8_t * dst,int32_t dst_stride)681 static void vp9_idct8x8_colcol_addblk_msa(int16_t *input, uint8_t *dst,
682                                           int32_t dst_stride)
683 {
684     v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
685     v8i16 zero = { 0 };
686 
687     /* load vector elements of 8x8 block */
688     LD_SH8(input, 8, in0, in1, in2, in3, in4, in5, in6, in7);
689     ST_SH8(zero, zero, zero, zero, zero, zero, zero, zero, input, 8);
690     /* 1D idct8x8 */
691     VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7,
692                    in0, in1, in2, in3, in4, in5, in6, in7);
693     /* columns transform */
694     TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
695                        in0, in1, in2, in3, in4, in5, in6, in7);
696     /* 1D idct8x8 */
697     VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7,
698                    in0, in1, in2, in3, in4, in5, in6, in7);
699     /* final rounding (add 2^4, divide by 2^5) and shift */
700     SRARI_H4_SH(in0, in1, in2, in3, 5);
701     SRARI_H4_SH(in4, in5, in6, in7, 5);
702     /* add block and store 8x8 */
703     VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3);
704     dst += (4 * dst_stride);
705     VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in4, in5, in6, in7);
706 }
707 
vp9_iadst8x8_colcol_addblk_msa(int16_t * input,uint8_t * dst,int32_t dst_stride)708 static void vp9_iadst8x8_colcol_addblk_msa(int16_t *input, uint8_t *dst,
709                                            int32_t dst_stride)
710 {
711     v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
712     v8i16 res0, res1, res2, res3, res4, res5, res6, res7;
713     v16u8 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
714     v8i16 out0, out1, out2, out3, out4, out5, out6, out7;
715     v8i16 cnst0, cnst1, cnst2, cnst3, cnst4;
716     v8i16 temp0, temp1, temp2, temp3, s0, s1;
717     v8i16 zero = { 0 };
718 
719     /* load vector elements of 8x8 block */
720     LD_SH8(input, 8, in0, in1, in2, in3, in4, in5, in6, in7);
721     ST_SH8(zero, zero, zero, zero, zero, zero, zero, zero, input, 8);
722 
723     /* 1D adst8x8 */
724     VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7,
725               in0, in1, in2, in3, in4, in5, in6, in7);
726 
727     /* columns transform */
728     TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
729                        in0, in1, in2, in3, in4, in5, in6, in7);
730 
731     cnst0 = __msa_fill_h(cospi_2_64);
732     cnst1 = __msa_fill_h(cospi_30_64);
733     cnst2 = -cnst0;
734     ILVEV_H2_SH(cnst0, cnst1, cnst1, cnst2, cnst0, cnst1);
735     cnst2 = __msa_fill_h(cospi_18_64);
736     cnst3 = __msa_fill_h(cospi_14_64);
737     cnst4 = -cnst2;
738     ILVEV_H2_SH(cnst2, cnst3, cnst3, cnst4, cnst2, cnst3);
739 
740     ILVRL_H2_SH(in0, in7, temp1, temp0);
741     ILVRL_H2_SH(in4, in3, temp3, temp2);
742     VP9_DOT_ADD_SUB_SRARI_PCK(temp0, temp1, temp2, temp3, cnst0, cnst1, cnst2,
743                               cnst3, in7, in0, in4, in3);
744 
745     cnst0 = __msa_fill_h(cospi_10_64);
746     cnst1 = __msa_fill_h(cospi_22_64);
747     cnst2 = -cnst0;
748     ILVEV_H2_SH(cnst0, cnst1, cnst1, cnst2, cnst0, cnst1);
749     cnst2 = __msa_fill_h(cospi_26_64);
750     cnst3 = __msa_fill_h(cospi_6_64);
751     cnst4 = -cnst2;
752     ILVEV_H2_SH(cnst2, cnst3, cnst3, cnst4, cnst2, cnst3);
753 
754     ILVRL_H2_SH(in2, in5, temp1, temp0);
755     ILVRL_H2_SH(in6, in1, temp3, temp2);
756     VP9_DOT_ADD_SUB_SRARI_PCK(temp0, temp1, temp2, temp3, cnst0, cnst1, cnst2,
757                               cnst3, in5, in2, in6, in1);
758     BUTTERFLY_4(in7, in0, in2, in5, s1, s0, in2, in5);
759     out7 = -s0;
760     out0 = s1;
761     SRARI_H2_SH(out0, out7, 5);
762     dst0 = LD_UB(dst + 0 * dst_stride);
763     dst7 = LD_UB(dst + 7 * dst_stride);
764 
765     res0 = (v8i16) __msa_ilvr_b((v16i8) zero, (v16i8) dst0);
766     res0 += out0;
767     CLIP_SH_0_255(res0);
768     res0 = (v8i16) __msa_pckev_b((v16i8) res0, (v16i8) res0);
769     ST_D1(res0, 0, dst);
770 
771     res7 = (v8i16) __msa_ilvr_b((v16i8) zero, (v16i8) dst7);
772     res7 += out7;
773     CLIP_SH_0_255(res7);
774     res7 = (v8i16) __msa_pckev_b((v16i8) res7, (v16i8) res7);
775     ST_D1(res7, 0, dst + 7 * dst_stride);
776 
777     cnst1 = __msa_fill_h(cospi_24_64);
778     cnst0 = __msa_fill_h(cospi_8_64);
779     cnst3 = -cnst1;
780     cnst2 = -cnst0;
781 
782     ILVEV_H2_SH(cnst3, cnst0, cnst1, cnst2, cnst3, cnst2);
783     cnst0 = __msa_ilvev_h(cnst1, cnst0);
784     cnst1 = cnst0;
785 
786     ILVRL_H2_SH(in4, in3, temp1, temp0);
787     ILVRL_H2_SH(in6, in1, temp3, temp2);
788     VP9_DOT_ADD_SUB_SRARI_PCK(temp0, temp1, temp2, temp3, cnst0, cnst2, cnst3,
789                               cnst1, out1, out6, s0, s1);
790     out1 = -out1;
791     SRARI_H2_SH(out1, out6, 5);
792     dst1 = LD_UB(dst + 1 * dst_stride);
793     dst6 = LD_UB(dst + 6 * dst_stride);
794     ILVR_B2_SH(zero, dst1, zero, dst6, res1, res6);
795     ADD2(res1, out1, res6, out6, res1, res6);
796     CLIP_SH2_0_255(res1, res6);
797     PCKEV_B2_SH(res1, res1, res6, res6, res1, res6);
798     ST_D1(res1, 0, dst + dst_stride);
799     ST_D1(res6, 0, dst + 6 * dst_stride);
800 
801     cnst0 = __msa_fill_h(cospi_16_64);
802     cnst1 = -cnst0;
803     cnst1 = __msa_ilvev_h(cnst1, cnst0);
804 
805     ILVRL_H2_SH(in2, in5, temp1, temp0);
806     ILVRL_H2_SH(s0, s1, temp3, temp2);
807     out3 = VP9_DOT_SHIFT_RIGHT_PCK_H(temp0, temp1, cnst0);
808     out4 = VP9_DOT_SHIFT_RIGHT_PCK_H(temp0, temp1, cnst1);
809     out3 = -out3;
810     SRARI_H2_SH(out3, out4, 5);
811     dst3 = LD_UB(dst + 3 * dst_stride);
812     dst4 = LD_UB(dst + 4 * dst_stride);
813     ILVR_B2_SH(zero, dst3, zero, dst4, res3, res4);
814     ADD2(res3, out3, res4, out4, res3, res4);
815     CLIP_SH2_0_255(res3, res4);
816     PCKEV_B2_SH(res3, res3, res4, res4, res3, res4);
817     ST_D1(res3, 0, dst + 3 * dst_stride);
818     ST_D1(res4, 0, dst + 4 * dst_stride);
819 
820     out2 = VP9_DOT_SHIFT_RIGHT_PCK_H(temp2, temp3, cnst0);
821     out5 = VP9_DOT_SHIFT_RIGHT_PCK_H(temp2, temp3, cnst1);
822     out5 = -out5;
823     SRARI_H2_SH(out2, out5, 5);
824     dst2 = LD_UB(dst + 2 * dst_stride);
825     dst5 = LD_UB(dst + 5 * dst_stride);
826     ILVR_B2_SH(zero, dst2, zero, dst5, res2, res5);
827     ADD2(res2, out2, res5, out5, res2, res5);
828     CLIP_SH2_0_255(res2, res5);
829     PCKEV_B2_SH(res2, res2, res5, res5, res2, res5);
830     ST_D1(res2, 0, dst + 2 * dst_stride);
831     ST_D1(res5, 0, dst + 5 * dst_stride);
832 }
833 
vp9_iadst_idct_8x8_add_msa(int16_t * input,uint8_t * dst,int32_t dst_stride,int32_t eob)834 static void vp9_iadst_idct_8x8_add_msa(int16_t *input, uint8_t *dst,
835                                        int32_t dst_stride, int32_t eob)
836 {
837     v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
838     v8i16 zero = { 0 };
839 
840     /* load vector elements of 8x8 block */
841     LD_SH8(input, 8, in1, in6, in3, in4, in5, in2, in7, in0);
842     ST_SH8(zero, zero, zero, zero, zero, zero, zero, zero, input, 8);
843     /* 1D idct8x8 */
844     VP9_IADST8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7,
845                     in0, in1, in2, in3, in4, in5, in6, in7);
846     /* columns transform */
847     TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
848                        in0, in1, in2, in3, in4, in5, in6, in7);
849     /* 1D idct8x8 */
850     VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7,
851                    in0, in1, in2, in3, in4, in5, in6, in7);
852     /* final rounding (add 2^4, divide by 2^5) and shift */
853     SRARI_H4_SH(in0, in1, in2, in3, 5);
854     SRARI_H4_SH(in4, in5, in6, in7, 5);
855     /* add block and store 8x8 */
856     VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3);
857     dst += (4 * dst_stride);
858     VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in4, in5, in6, in7);
859 }
860 
vp9_idct_iadst_8x8_add_msa(int16_t * input,uint8_t * dst,int32_t dst_stride,int32_t eob)861 static void vp9_idct_iadst_8x8_add_msa(int16_t *input, uint8_t *dst,
862                                        int32_t dst_stride, int32_t eob)
863 {
864     v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
865     v8i16 zero = { 0 };
866 
867     /* load vector elements of 8x8 block */
868     LD_SH8(input, 8, in0, in1, in2, in3, in4, in5, in6, in7);
869     ST_SH8(zero, zero, zero, zero, zero, zero, zero, zero, input, 8);
870 
871     /* 1D idct8x8 */
872     VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7,
873                    in0, in1, in2, in3, in4, in5, in6, in7);
874     /* columns transform */
875     TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
876                        in1, in6, in3, in4, in5, in2, in7, in0);
877     /* 1D idct8x8 */
878     VP9_IADST8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7,
879                     in0, in1, in2, in3, in4, in5, in6, in7);
880     /* final rounding (add 2^4, divide by 2^5) and shift */
881     SRARI_H4_SH(in0, in1, in2, in3, 5);
882     SRARI_H4_SH(in4, in5, in6, in7, 5);
883     /* add block and store 8x8 */
884     VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3);
885     dst += (4 * dst_stride);
886     VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in4, in5, in6, in7);
887 }
888 
889 #define VP9_IADST8x16_1D(r0, r1, r2, r3, r4, r5, r6, r7, r8,          \
890                          r9, r10, r11, r12, r13, r14, r15,            \
891                          out0, out1, out2, out3, out4, out5,          \
892                          out6, out7, out8, out9, out10, out11,        \
893                          out12, out13, out14, out15)                  \
894 {                                                                     \
895     v8i16 g0_m, g1_m, g2_m, g3_m, g4_m, g5_m, g6_m, g7_m;             \
896     v8i16 g8_m, g9_m, g10_m, g11_m, g12_m, g13_m, g14_m, g15_m;       \
897     v8i16 h0_m, h1_m, h2_m, h3_m, h4_m, h5_m, h6_m, h7_m;             \
898     v8i16 h8_m, h9_m, h10_m, h11_m;                                   \
899     v8i16 k0_m, k1_m, k2_m, k3_m;                                     \
900                                                                       \
901     /* stage 1 */                                                     \
902     k0_m = VP9_SET_COSPI_PAIR(cospi_1_64, cospi_31_64);               \
903     k1_m = VP9_SET_COSPI_PAIR(cospi_31_64, -cospi_1_64);              \
904     k2_m = VP9_SET_COSPI_PAIR(cospi_17_64, cospi_15_64);              \
905     k3_m = VP9_SET_COSPI_PAIR(cospi_15_64, -cospi_17_64);             \
906     VP9_MADD_BF(r15, r0, r7, r8, k0_m, k1_m, k2_m, k3_m,              \
907                 g0_m, g1_m, g2_m, g3_m);                              \
908     k0_m = VP9_SET_COSPI_PAIR(cospi_5_64, cospi_27_64);               \
909     k1_m = VP9_SET_COSPI_PAIR(cospi_27_64, -cospi_5_64);              \
910     k2_m = VP9_SET_COSPI_PAIR(cospi_21_64, cospi_11_64);              \
911     k3_m = VP9_SET_COSPI_PAIR(cospi_11_64, -cospi_21_64);             \
912     VP9_MADD_BF(r13, r2, r5, r10, k0_m, k1_m, k2_m, k3_m,             \
913                 g4_m, g5_m, g6_m, g7_m);                              \
914     k0_m = VP9_SET_COSPI_PAIR(cospi_9_64, cospi_23_64);               \
915     k1_m = VP9_SET_COSPI_PAIR(cospi_23_64, -cospi_9_64);              \
916     k2_m = VP9_SET_COSPI_PAIR(cospi_25_64, cospi_7_64);               \
917     k3_m = VP9_SET_COSPI_PAIR(cospi_7_64, -cospi_25_64);              \
918     VP9_MADD_BF(r11, r4, r3, r12, k0_m, k1_m, k2_m, k3_m,             \
919                 g8_m, g9_m, g10_m, g11_m);                            \
920     k0_m = VP9_SET_COSPI_PAIR(cospi_13_64, cospi_19_64);              \
921     k1_m = VP9_SET_COSPI_PAIR(cospi_19_64, -cospi_13_64);             \
922     k2_m = VP9_SET_COSPI_PAIR(cospi_29_64, cospi_3_64);               \
923     k3_m = VP9_SET_COSPI_PAIR(cospi_3_64, -cospi_29_64);              \
924     VP9_MADD_BF(r9, r6, r1, r14, k0_m, k1_m, k2_m, k3_m,              \
925                 g12_m, g13_m, g14_m, g15_m);                          \
926                                                                       \
927     /* stage 2 */                                                     \
928     k0_m = VP9_SET_COSPI_PAIR(cospi_4_64, cospi_28_64);               \
929     k1_m = VP9_SET_COSPI_PAIR(cospi_28_64, -cospi_4_64);              \
930     k2_m = VP9_SET_COSPI_PAIR(-cospi_28_64, cospi_4_64);              \
931     VP9_MADD_BF(g1_m, g3_m, g9_m, g11_m, k0_m, k1_m, k2_m, k0_m,      \
932                 h0_m, h1_m, h2_m, h3_m);                              \
933     k0_m = VP9_SET_COSPI_PAIR(cospi_12_64, cospi_20_64);              \
934     k1_m = VP9_SET_COSPI_PAIR(-cospi_20_64, cospi_12_64);             \
935     k2_m = VP9_SET_COSPI_PAIR(cospi_20_64, -cospi_12_64);             \
936     VP9_MADD_BF(g7_m, g5_m, g15_m, g13_m, k0_m, k1_m, k2_m, k0_m,     \
937                 h4_m, h5_m, h6_m, h7_m);                              \
938     BUTTERFLY_4(h0_m, h2_m, h6_m, h4_m, out8, out9, out11, out10);    \
939     BUTTERFLY_8(g0_m, g2_m, g4_m, g6_m, g14_m, g12_m, g10_m, g8_m,    \
940                 h8_m, h9_m, h10_m, h11_m, h6_m, h4_m, h2_m, h0_m);    \
941                                                                       \
942     /* stage 3 */                                                     \
943     BUTTERFLY_4(h8_m, h9_m, h11_m, h10_m, out0, out1, h11_m, h10_m);  \
944     k0_m = VP9_SET_COSPI_PAIR(cospi_8_64, cospi_24_64);               \
945     k1_m = VP9_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64);              \
946     k2_m = VP9_SET_COSPI_PAIR(-cospi_24_64, cospi_8_64);              \
947     VP9_MADD_BF(h0_m, h2_m, h4_m, h6_m, k0_m, k1_m, k2_m, k0_m,       \
948                 out4, out6, out5, out7);                              \
949     VP9_MADD_BF(h1_m, h3_m, h5_m, h7_m, k0_m, k1_m, k2_m, k0_m,       \
950                 out12, out14, out13, out15);                          \
951                                                                       \
952     /* stage 4 */                                                     \
953     k0_m = VP9_SET_COSPI_PAIR(cospi_16_64, cospi_16_64);              \
954     k1_m = VP9_SET_COSPI_PAIR(-cospi_16_64, -cospi_16_64);            \
955     k2_m = VP9_SET_COSPI_PAIR(cospi_16_64, -cospi_16_64);             \
956     k3_m = VP9_SET_COSPI_PAIR(-cospi_16_64, cospi_16_64);             \
957     VP9_MADD_SHORT(h10_m, h11_m, k1_m, k2_m, out2, out3);             \
958     VP9_MADD_SHORT(out6, out7, k0_m, k3_m, out6, out7);               \
959     VP9_MADD_SHORT(out10, out11, k0_m, k3_m, out10, out11);           \
960     VP9_MADD_SHORT(out14, out15, k1_m, k2_m, out14, out15);           \
961 }
962 
vp9_idct16_1d_columns_addblk_msa(int16_t * input,uint8_t * dst,int32_t dst_stride)963 static void vp9_idct16_1d_columns_addblk_msa(int16_t *input, uint8_t *dst,
964                                              int32_t dst_stride)
965 {
966     v8i16 loc0, loc1, loc2, loc3;
967     v8i16 reg0, reg2, reg4, reg6, reg8, reg10, reg12, reg14;
968     v8i16 reg3, reg13, reg11, reg5, reg7, reg9, reg1, reg15;
969     v8i16 tmp5, tmp6, tmp7;
970     v8i16 zero = { 0 };
971 
972     /* load up 8x16 */
973     LD_SH16(input, 16,
974             reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7,
975             reg8, reg9, reg10, reg11, reg12, reg13, reg14, reg15);
976 
977     ST_SH8(zero, zero, zero, zero, zero, zero, zero, zero, input, 16);
978     input += 8 * 16;
979     ST_SH8(zero, zero, zero, zero, zero, zero, zero, zero, input, 16);
980 
981     VP9_DOTP_CONST_PAIR(reg2, reg14, cospi_28_64, cospi_4_64, reg2, reg14);
982     VP9_DOTP_CONST_PAIR(reg10, reg6, cospi_12_64, cospi_20_64, reg10, reg6);
983     BUTTERFLY_4(reg2, reg14, reg6, reg10, loc0, loc1, reg14, reg2);
984     VP9_DOTP_CONST_PAIR(reg14, reg2, cospi_16_64, cospi_16_64, loc2, loc3);
985     VP9_DOTP_CONST_PAIR(reg0, reg8, cospi_16_64, cospi_16_64, reg0, reg8);
986     VP9_DOTP_CONST_PAIR(reg4, reg12, cospi_24_64, cospi_8_64, reg4, reg12);
987     BUTTERFLY_4(reg8, reg0, reg4, reg12, reg2, reg6, reg10, reg14);
988 
989     reg0 = reg2 - loc1;
990     reg2 = reg2 + loc1;
991     reg12 = reg14 - loc0;
992     reg14 = reg14 + loc0;
993     reg4 = reg6 - loc3;
994     reg6 = reg6 + loc3;
995     reg8 = reg10 - loc2;
996     reg10 = reg10 + loc2;
997 
998     /* stage 2 */
999     VP9_DOTP_CONST_PAIR(reg1, reg15, cospi_30_64, cospi_2_64, reg1, reg15);
1000     VP9_DOTP_CONST_PAIR(reg9, reg7, cospi_14_64, cospi_18_64, loc2, loc3);
1001 
1002     reg9 = reg1 - loc2;
1003     reg1 = reg1 + loc2;
1004     reg7 = reg15 - loc3;
1005     reg15 = reg15 + loc3;
1006 
1007     VP9_DOTP_CONST_PAIR(reg5, reg11, cospi_22_64, cospi_10_64, reg5, reg11);
1008     VP9_DOTP_CONST_PAIR(reg13, reg3, cospi_6_64, cospi_26_64, loc0, loc1);
1009     BUTTERFLY_4(loc0, loc1, reg11, reg5, reg13, reg3, reg11, reg5);
1010 
1011     loc1 = reg15 + reg3;
1012     reg3 = reg15 - reg3;
1013     loc2 = reg2 + loc1;
1014     reg15 = reg2 - loc1;
1015 
1016     loc1 = reg1 + reg13;
1017     reg13 = reg1 - reg13;
1018     loc0 = reg0 + loc1;
1019     loc1 = reg0 - loc1;
1020     tmp6 = loc0;
1021     tmp7 = loc1;
1022     reg0 = loc2;
1023 
1024     VP9_DOTP_CONST_PAIR(reg7, reg9, cospi_24_64, cospi_8_64, reg7, reg9);
1025     VP9_DOTP_CONST_PAIR((-reg5), (-reg11), cospi_8_64, cospi_24_64, reg5,
1026                         reg11);
1027 
1028     loc0 = reg9 + reg5;
1029     reg5 = reg9 - reg5;
1030     reg2 = reg6 + loc0;
1031     reg1 = reg6 - loc0;
1032 
1033     loc0 = reg7 + reg11;
1034     reg11 = reg7 - reg11;
1035     loc1 = reg4 + loc0;
1036     loc2 = reg4 - loc0;
1037     tmp5 = loc1;
1038 
1039     VP9_DOTP_CONST_PAIR(reg5, reg11, cospi_16_64, cospi_16_64, reg5, reg11);
1040     BUTTERFLY_4(reg8, reg10, reg11, reg5, loc0, reg4, reg9, loc1);
1041 
1042     reg10 = loc0;
1043     reg11 = loc1;
1044 
1045     VP9_DOTP_CONST_PAIR(reg3, reg13, cospi_16_64, cospi_16_64, reg3, reg13);
1046     BUTTERFLY_4(reg12, reg14, reg13, reg3, reg8, reg6, reg7, reg5);
1047     reg13 = loc2;
1048 
1049     /* Transpose and store the output */
1050     reg12 = tmp5;
1051     reg14 = tmp6;
1052     reg3 = tmp7;
1053 
1054     SRARI_H4_SH(reg0, reg2, reg4, reg6, 6);
1055     VP9_ADDBLK_ST8x4_UB(dst, dst_stride, reg0, reg2, reg4, reg6);
1056     dst += (4 * dst_stride);
1057     SRARI_H4_SH(reg8, reg10, reg12, reg14, 6);
1058     VP9_ADDBLK_ST8x4_UB(dst, dst_stride, reg8, reg10, reg12, reg14);
1059     dst += (4 * dst_stride);
1060     SRARI_H4_SH(reg3, reg13, reg11, reg5, 6);
1061     VP9_ADDBLK_ST8x4_UB(dst, dst_stride, reg3, reg13, reg11, reg5);
1062     dst += (4 * dst_stride);
1063     SRARI_H4_SH(reg7, reg9, reg1, reg15, 6);
1064     VP9_ADDBLK_ST8x4_UB(dst, dst_stride, reg7, reg9, reg1, reg15);
1065 }
1066 
vp9_idct16_1d_columns_msa(int16_t * input,int16_t * output)1067 static void vp9_idct16_1d_columns_msa(int16_t *input, int16_t *output)
1068 {
1069     v8i16 loc0, loc1, loc2, loc3;
1070     v8i16 reg0, reg2, reg4, reg6, reg8, reg10, reg12, reg14;
1071     v8i16 reg3, reg13, reg11, reg5, reg7, reg9, reg1, reg15;
1072     v8i16 tmp5, tmp6, tmp7;
1073     v8i16 zero = { 0 };
1074 
1075     /* load up 8x16 */
1076     LD_SH16(input, 16,
1077             reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7,
1078             reg8, reg9, reg10, reg11, reg12, reg13, reg14, reg15);
1079 
1080     ST_SH8(zero, zero, zero, zero, zero, zero, zero, zero, input, 16);
1081     input += 16 * 8;
1082     ST_SH8(zero, zero, zero, zero, zero, zero, zero, zero, input, 16);
1083 
1084     VP9_DOTP_CONST_PAIR(reg2, reg14, cospi_28_64, cospi_4_64, reg2, reg14);
1085     VP9_DOTP_CONST_PAIR(reg10, reg6, cospi_12_64, cospi_20_64, reg10, reg6);
1086     BUTTERFLY_4(reg2, reg14, reg6, reg10, loc0, loc1, reg14, reg2);
1087     VP9_DOTP_CONST_PAIR(reg14, reg2, cospi_16_64, cospi_16_64, loc2, loc3);
1088     VP9_DOTP_CONST_PAIR(reg0, reg8, cospi_16_64, cospi_16_64, reg0, reg8);
1089     VP9_DOTP_CONST_PAIR(reg4, reg12, cospi_24_64, cospi_8_64, reg4, reg12);
1090     BUTTERFLY_4(reg8, reg0, reg4, reg12, reg2, reg6, reg10, reg14);
1091 
1092     reg0 = reg2 - loc1;
1093     reg2 = reg2 + loc1;
1094     reg12 = reg14 - loc0;
1095     reg14 = reg14 + loc0;
1096     reg4 = reg6 - loc3;
1097     reg6 = reg6 + loc3;
1098     reg8 = reg10 - loc2;
1099     reg10 = reg10 + loc2;
1100 
1101     /* stage 2 */
1102     VP9_DOTP_CONST_PAIR(reg1, reg15, cospi_30_64, cospi_2_64, reg1, reg15);
1103     VP9_DOTP_CONST_PAIR(reg9, reg7, cospi_14_64, cospi_18_64, loc2, loc3);
1104 
1105     reg9 = reg1 - loc2;
1106     reg1 = reg1 + loc2;
1107     reg7 = reg15 - loc3;
1108     reg15 = reg15 + loc3;
1109 
1110     VP9_DOTP_CONST_PAIR(reg5, reg11, cospi_22_64, cospi_10_64, reg5, reg11);
1111     VP9_DOTP_CONST_PAIR(reg13, reg3, cospi_6_64, cospi_26_64, loc0, loc1);
1112     BUTTERFLY_4(loc0, loc1, reg11, reg5, reg13, reg3, reg11, reg5);
1113 
1114     loc1 = reg15 + reg3;
1115     reg3 = reg15 - reg3;
1116     loc2 = reg2 + loc1;
1117     reg15 = reg2 - loc1;
1118 
1119     loc1 = reg1 + reg13;
1120     reg13 = reg1 - reg13;
1121     loc0 = reg0 + loc1;
1122     loc1 = reg0 - loc1;
1123     tmp6 = loc0;
1124     tmp7 = loc1;
1125     reg0 = loc2;
1126 
1127     VP9_DOTP_CONST_PAIR(reg7, reg9, cospi_24_64, cospi_8_64, reg7, reg9);
1128     VP9_DOTP_CONST_PAIR((-reg5), (-reg11), cospi_8_64, cospi_24_64, reg5,
1129                         reg11);
1130 
1131     loc0 = reg9 + reg5;
1132     reg5 = reg9 - reg5;
1133     reg2 = reg6 + loc0;
1134     reg1 = reg6 - loc0;
1135 
1136     loc0 = reg7 + reg11;
1137     reg11 = reg7 - reg11;
1138     loc1 = reg4 + loc0;
1139     loc2 = reg4 - loc0;
1140 
1141     tmp5 = loc1;
1142 
1143     VP9_DOTP_CONST_PAIR(reg5, reg11, cospi_16_64, cospi_16_64, reg5, reg11);
1144     BUTTERFLY_4(reg8, reg10, reg11, reg5, loc0, reg4, reg9, loc1);
1145 
1146     reg10 = loc0;
1147     reg11 = loc1;
1148 
1149     VP9_DOTP_CONST_PAIR(reg3, reg13, cospi_16_64, cospi_16_64, reg3, reg13);
1150     BUTTERFLY_4(reg12, reg14, reg13, reg3, reg8, reg6, reg7, reg5);
1151     reg13 = loc2;
1152 
1153     /* Transpose and store the output */
1154     reg12 = tmp5;
1155     reg14 = tmp6;
1156     reg3 = tmp7;
1157 
1158     /* transpose block */
1159     TRANSPOSE8x8_SH_SH(reg0, reg2, reg4, reg6, reg8, reg10, reg12, reg14,
1160                        reg0, reg2, reg4, reg6, reg8, reg10, reg12, reg14);
1161     ST_SH4(reg0, reg2, reg4, reg6, output, 16);
1162     ST_SH4(reg8, reg10, reg12, reg14, (output + 4 * 16), 16);
1163 
1164     /* transpose block */
1165     TRANSPOSE8x8_SH_SH(reg3, reg13, reg11, reg5, reg7, reg9, reg1, reg15,
1166                        reg3, reg13, reg11, reg5, reg7, reg9, reg1, reg15);
1167     ST_SH4(reg3, reg13, reg11, reg5, (output + 8), 16);
1168     ST_SH4(reg7, reg9, reg1, reg15, (output + 8 + 4 * 16), 16);
1169 }
1170 
vp9_idct16x16_1_add_msa(int16_t * input,uint8_t * dst,int32_t dst_stride)1171 static void vp9_idct16x16_1_add_msa(int16_t *input, uint8_t *dst,
1172                                     int32_t dst_stride)
1173 {
1174     uint8_t i;
1175     int16_t out;
1176     v8i16 vec, res0, res1, res2, res3, res4, res5, res6, res7;
1177     v16u8 dst0, dst1, dst2, dst3, tmp0, tmp1, tmp2, tmp3;
1178 
1179     out = ROUND_POWER_OF_TWO((input[0] * cospi_16_64), VP9_DCT_CONST_BITS);
1180     out = ROUND_POWER_OF_TWO((out * cospi_16_64), VP9_DCT_CONST_BITS);
1181     out = ROUND_POWER_OF_TWO(out, 6);
1182     input[0] = 0;
1183 
1184     vec = __msa_fill_h(out);
1185 
1186     for (i = 4; i--;) {
1187         LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3);
1188         UNPCK_UB_SH(dst0, res0, res4);
1189         UNPCK_UB_SH(dst1, res1, res5);
1190         UNPCK_UB_SH(dst2, res2, res6);
1191         UNPCK_UB_SH(dst3, res3, res7);
1192         ADD4(res0, vec, res1, vec, res2, vec, res3, vec, res0, res1, res2,
1193              res3);
1194         ADD4(res4, vec, res5, vec, res6, vec, res7, vec, res4, res5, res6,
1195              res7);
1196         CLIP_SH8_0_255(res0, res1, res2, res3, res4, res5, res6, res7);
1197         PCKEV_B4_UB(res4, res0, res5, res1, res6, res2, res7, res3,
1198                     tmp0, tmp1, tmp2, tmp3);
1199         ST_UB4(tmp0, tmp1, tmp2, tmp3, dst, dst_stride);
1200         dst += (4 * dst_stride);
1201     }
1202 }
1203 
vp9_idct16x16_10_colcol_addblk_msa(int16_t * input,uint8_t * dst,int32_t dst_stride)1204 static void vp9_idct16x16_10_colcol_addblk_msa(int16_t *input, uint8_t *dst,
1205                                                int32_t dst_stride)
1206 {
1207     int32_t i;
1208     int16_t out_arr[16 * 16] ALLOC_ALIGNED(ALIGNMENT);
1209     int16_t *out = out_arr;
1210 
1211     /* transform rows */
1212     vp9_idct16_1d_columns_msa(input, out);
1213 
1214     /* short case just considers top 4 rows as valid output */
1215     out += 4 * 16;
1216     for (i = 12; i--;) {
1217         __asm__ volatile (
1218             "sw     $zero,   0(%[out])     \n\t"
1219             "sw     $zero,   4(%[out])     \n\t"
1220             "sw     $zero,   8(%[out])     \n\t"
1221             "sw     $zero,  12(%[out])     \n\t"
1222             "sw     $zero,  16(%[out])     \n\t"
1223             "sw     $zero,  20(%[out])     \n\t"
1224             "sw     $zero,  24(%[out])     \n\t"
1225             "sw     $zero,  28(%[out])     \n\t"
1226 
1227             :
1228             : [out] "r" (out)
1229         );
1230 
1231         out += 16;
1232     }
1233 
1234     out = out_arr;
1235 
1236     /* transform columns */
1237     for (i = 0; i < 2; i++) {
1238         /* process 8 * 16 block */
1239         vp9_idct16_1d_columns_addblk_msa((out + (i << 3)), (dst + (i << 3)),
1240                                          dst_stride);
1241     }
1242 }
1243 
vp9_idct16x16_colcol_addblk_msa(int16_t * input,uint8_t * dst,int32_t dst_stride)1244 static void vp9_idct16x16_colcol_addblk_msa(int16_t *input, uint8_t *dst,
1245                                             int32_t dst_stride)
1246 {
1247     int32_t i;
1248     int16_t out_arr[16 * 16] ALLOC_ALIGNED(ALIGNMENT);
1249     int16_t *out = out_arr;
1250 
1251     /* transform rows */
1252     for (i = 0; i < 2; i++) {
1253         /* process 8 * 16 block */
1254         vp9_idct16_1d_columns_msa((input + (i << 3)), (out + (i << 7)));
1255     }
1256 
1257     /* transform columns */
1258     for (i = 0; i < 2; i++) {
1259         /* process 8 * 16 block */
1260         vp9_idct16_1d_columns_addblk_msa((out + (i << 3)), (dst + (i << 3)),
1261                                          dst_stride);
1262     }
1263 }
1264 
vp9_iadst16_1d_columns_msa(int16_t * input,int16_t * output)1265 static void vp9_iadst16_1d_columns_msa(int16_t *input, int16_t *output)
1266 {
1267     v8i16 r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, r15;
1268     v8i16 l0, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12, l13, l14, l15;
1269     v8i16 zero = { 0 };
1270 
1271     /* load input data */
1272     LD_SH16(input, 16,
1273             l0, l1, l2, l3, l4, l5, l6, l7,
1274             l8, l9, l10, l11, l12, l13, l14, l15);
1275 
1276     ST_SH8(zero, zero, zero, zero, zero, zero, zero, zero, input, 16);
1277     input += 16 * 8;
1278     ST_SH8(zero, zero, zero, zero, zero, zero, zero, zero, input, 16);
1279 
1280     /* ADST in horizontal */
1281     VP9_IADST8x16_1D(l0, l1, l2, l3, l4, l5, l6, l7,
1282                      l8, l9, l10, l11, l12, l13, l14, l15,
1283                      r0, r1, r2, r3, r4, r5, r6, r7,
1284                      r8, r9, r10, r11, r12, r13, r14, r15);
1285 
1286     l1 = -r8;
1287     l3 = -r4;
1288     l13 = -r13;
1289     l15 = -r1;
1290 
1291     TRANSPOSE8x8_SH_SH(r0, l1, r12, l3, r6, r14, r10, r2,
1292                        l0, l1, l2, l3, l4, l5, l6, l7);
1293     ST_SH8(l0, l1, l2, l3, l4, l5, l6, l7, output, 16);
1294     TRANSPOSE8x8_SH_SH(r3, r11, r15, r7, r5, l13, r9, l15,
1295                        l8, l9, l10, l11, l12, l13, l14, l15);
1296     ST_SH8(l8, l9, l10, l11, l12, l13, l14, l15, (output + 8), 16);
1297 }
1298 
vp9_iadst16_1d_columns_addblk_msa(int16_t * input,uint8_t * dst,int32_t dst_stride)1299 static void vp9_iadst16_1d_columns_addblk_msa(int16_t *input, uint8_t *dst,
1300                                               int32_t dst_stride)
1301 {
1302     v8i16 v0, v2, v4, v6, k0, k1, k2, k3;
1303     v8i16 r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, r15;
1304     v8i16 out0, out1, out2, out3, out4, out5, out6, out7;
1305     v8i16 out8, out9, out10, out11, out12, out13, out14, out15;
1306     v8i16 g0, g1, g2, g3, g4, g5, g6, g7, g8, g9, g10, g11, g12, g13, g14, g15;
1307     v8i16 h0, h1, h2, h3, h4, h5, h6, h7, h8, h9, h10, h11;
1308     v8i16 res0, res1, res2, res3, res4, res5, res6, res7;
1309     v8i16 res8, res9, res10, res11, res12, res13, res14, res15;
1310     v16u8 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
1311     v16u8 dst8, dst9, dst10, dst11, dst12, dst13, dst14, dst15;
1312     v16i8 zero = { 0 };
1313 
1314     r0 = LD_SH(input + 0 * 16);
1315     r3 = LD_SH(input + 3 * 16);
1316     r4 = LD_SH(input + 4 * 16);
1317     r7 = LD_SH(input + 7 * 16);
1318     r8 = LD_SH(input + 8 * 16);
1319     r11 = LD_SH(input + 11 * 16);
1320     r12 = LD_SH(input + 12 * 16);
1321     r15 = LD_SH(input + 15 * 16);
1322 
1323     /* stage 1 */
1324     k0 = VP9_SET_COSPI_PAIR(cospi_1_64, cospi_31_64);
1325     k1 = VP9_SET_COSPI_PAIR(cospi_31_64, -cospi_1_64);
1326     k2 = VP9_SET_COSPI_PAIR(cospi_17_64, cospi_15_64);
1327     k3 = VP9_SET_COSPI_PAIR(cospi_15_64, -cospi_17_64);
1328     VP9_MADD_BF(r15, r0, r7, r8, k0, k1, k2, k3, g0, g1, g2, g3);
1329     k0 = VP9_SET_COSPI_PAIR(cospi_9_64, cospi_23_64);
1330     k1 = VP9_SET_COSPI_PAIR(cospi_23_64, -cospi_9_64);
1331     k2 = VP9_SET_COSPI_PAIR(cospi_25_64, cospi_7_64);
1332     k3 = VP9_SET_COSPI_PAIR(cospi_7_64, -cospi_25_64);
1333     VP9_MADD_BF(r11, r4, r3, r12, k0, k1, k2, k3, g8, g9, g10, g11);
1334     BUTTERFLY_4(g0, g2, g10, g8, h8, h9, v2, v0);
1335     k0 = VP9_SET_COSPI_PAIR(cospi_4_64, cospi_28_64);
1336     k1 = VP9_SET_COSPI_PAIR(cospi_28_64, -cospi_4_64);
1337     k2 = VP9_SET_COSPI_PAIR(-cospi_28_64, cospi_4_64);
1338     VP9_MADD_BF(g1, g3, g9, g11, k0, k1, k2, k0, h0, h1, h2, h3);
1339 
1340     r1 = LD_SH(input + 1 * 16);
1341     r2 = LD_SH(input + 2 * 16);
1342     r5 = LD_SH(input + 5 * 16);
1343     r6 = LD_SH(input + 6 * 16);
1344     r9 = LD_SH(input + 9 * 16);
1345     r10 = LD_SH(input + 10 * 16);
1346     r13 = LD_SH(input + 13 * 16);
1347     r14 = LD_SH(input + 14 * 16);
1348 
1349     k0 = VP9_SET_COSPI_PAIR(cospi_5_64, cospi_27_64);
1350     k1 = VP9_SET_COSPI_PAIR(cospi_27_64, -cospi_5_64);
1351     k2 = VP9_SET_COSPI_PAIR(cospi_21_64, cospi_11_64);
1352     k3 = VP9_SET_COSPI_PAIR(cospi_11_64, -cospi_21_64);
1353     VP9_MADD_BF(r13, r2, r5, r10, k0, k1, k2, k3, g4, g5, g6, g7);
1354     k0 = VP9_SET_COSPI_PAIR(cospi_13_64, cospi_19_64);
1355     k1 = VP9_SET_COSPI_PAIR(cospi_19_64, -cospi_13_64);
1356     k2 = VP9_SET_COSPI_PAIR(cospi_29_64, cospi_3_64);
1357     k3 = VP9_SET_COSPI_PAIR(cospi_3_64, -cospi_29_64);
1358     VP9_MADD_BF(r9, r6, r1, r14, k0, k1, k2, k3, g12, g13, g14, g15);
1359     BUTTERFLY_4(g4, g6, g14, g12, h10, h11, v6, v4);
1360     BUTTERFLY_4(h8, h9, h11, h10, out0, out1, h11, h10);
1361     out1 = -out1;
1362     SRARI_H2_SH(out0, out1, 6);
1363     dst0 = LD_UB(dst + 0 * dst_stride);
1364     dst1 = LD_UB(dst + 15 * dst_stride);
1365     ILVR_B2_SH(zero, dst0, zero, dst1, res0, res1);
1366     ADD2(res0, out0, res1, out1, res0, res1);
1367     CLIP_SH2_0_255(res0, res1);
1368     PCKEV_B2_SH(res0, res0, res1, res1, res0, res1);
1369     ST_D1(res0, 0, dst);
1370     ST_D1(res1, 0, dst + 15 * dst_stride);
1371 
1372     k0 = VP9_SET_COSPI_PAIR(cospi_12_64, cospi_20_64);
1373     k1 = VP9_SET_COSPI_PAIR(-cospi_20_64, cospi_12_64);
1374     k2 = VP9_SET_COSPI_PAIR(cospi_20_64, -cospi_12_64);
1375     VP9_MADD_BF(g7, g5, g15, g13, k0, k1, k2, k0, h4, h5, h6, h7);
1376     BUTTERFLY_4(h0, h2, h6, h4, out8, out9, out11, out10);
1377     out8 = -out8;
1378 
1379     SRARI_H2_SH(out8, out9, 6);
1380     dst8 = LD_UB(dst + 1 * dst_stride);
1381     dst9 = LD_UB(dst + 14 * dst_stride);
1382     ILVR_B2_SH(zero, dst8, zero, dst9, res8, res9);
1383     ADD2(res8, out8, res9, out9, res8, res9);
1384     CLIP_SH2_0_255(res8, res9);
1385     PCKEV_B2_SH(res8, res8, res9, res9, res8, res9);
1386     ST_D1(res8, 0, dst + dst_stride);
1387     ST_D1(res9, 0, dst + 14 * dst_stride);
1388 
1389     k0 = VP9_SET_COSPI_PAIR(cospi_8_64, cospi_24_64);
1390     k1 = VP9_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64);
1391     k2 = VP9_SET_COSPI_PAIR(-cospi_24_64, cospi_8_64);
1392     VP9_MADD_BF(v0, v2, v4, v6, k0, k1, k2, k0, out4, out6, out5, out7);
1393     out4 = -out4;
1394     SRARI_H2_SH(out4, out5, 6);
1395     dst4 = LD_UB(dst + 3 * dst_stride);
1396     dst5 = LD_UB(dst + 12 * dst_stride);
1397     ILVR_B2_SH(zero, dst4, zero, dst5, res4, res5);
1398     ADD2(res4, out4, res5, out5, res4, res5);
1399     CLIP_SH2_0_255(res4, res5);
1400     PCKEV_B2_SH(res4, res4, res5, res5, res4, res5);
1401     ST_D1(res4, 0, dst + 3 * dst_stride);
1402     ST_D1(res5, 0, dst + 12 * dst_stride);
1403 
1404     VP9_MADD_BF(h1, h3, h5, h7, k0, k1, k2, k0, out12, out14, out13, out15);
1405     out13 = -out13;
1406     SRARI_H2_SH(out12, out13, 6);
1407     dst12 = LD_UB(dst + 2 * dst_stride);
1408     dst13 = LD_UB(dst + 13 * dst_stride);
1409     ILVR_B2_SH(zero, dst12, zero, dst13, res12, res13);
1410     ADD2(res12, out12, res13, out13, res12, res13);
1411     CLIP_SH2_0_255(res12, res13);
1412     PCKEV_B2_SH(res12, res12, res13, res13, res12, res13);
1413     ST_D1(res12, 0, dst + 2 * dst_stride);
1414     ST_D1(res13, 0, dst + 13 * dst_stride);
1415 
1416     k0 = VP9_SET_COSPI_PAIR(cospi_16_64, cospi_16_64);
1417     k3 = VP9_SET_COSPI_PAIR(-cospi_16_64, cospi_16_64);
1418     VP9_MADD_SHORT(out6, out7, k0, k3, out6, out7);
1419     SRARI_H2_SH(out6, out7, 6);
1420     dst6 = LD_UB(dst + 4 * dst_stride);
1421     dst7 = LD_UB(dst + 11 * dst_stride);
1422     ILVR_B2_SH(zero, dst6, zero, dst7, res6, res7);
1423     ADD2(res6, out6, res7, out7, res6, res7);
1424     CLIP_SH2_0_255(res6, res7);
1425     PCKEV_B2_SH(res6, res6, res7, res7, res6, res7);
1426     ST_D1(res6, 0, dst + 4 * dst_stride);
1427     ST_D1(res7, 0, dst + 11 * dst_stride);
1428 
1429     VP9_MADD_SHORT(out10, out11, k0, k3, out10, out11);
1430     SRARI_H2_SH(out10, out11, 6);
1431     dst10 = LD_UB(dst + 6 * dst_stride);
1432     dst11 = LD_UB(dst + 9 * dst_stride);
1433     ILVR_B2_SH(zero, dst10, zero, dst11, res10, res11);
1434     ADD2(res10, out10, res11, out11, res10, res11);
1435     CLIP_SH2_0_255(res10, res11);
1436     PCKEV_B2_SH(res10, res10, res11, res11, res10, res11);
1437     ST_D1(res10, 0, dst + 6 * dst_stride);
1438     ST_D1(res11, 0, dst + 9 * dst_stride);
1439 
1440     k1 = VP9_SET_COSPI_PAIR(-cospi_16_64, -cospi_16_64);
1441     k2 = VP9_SET_COSPI_PAIR(cospi_16_64, -cospi_16_64);
1442     VP9_MADD_SHORT(h10, h11, k1, k2, out2, out3);
1443     SRARI_H2_SH(out2, out3, 6);
1444     dst2 = LD_UB(dst + 7 * dst_stride);
1445     dst3 = LD_UB(dst + 8 * dst_stride);
1446     ILVR_B2_SH(zero, dst2, zero, dst3, res2, res3);
1447     ADD2(res2, out2, res3, out3, res2, res3);
1448     CLIP_SH2_0_255(res2, res3);
1449     PCKEV_B2_SH(res2, res2, res3, res3, res2, res3);
1450     ST_D1(res2, 0, dst + 7 * dst_stride);
1451     ST_D1(res3, 0, dst + 8 * dst_stride);
1452 
1453     VP9_MADD_SHORT(out14, out15, k1, k2, out14, out15);
1454     SRARI_H2_SH(out14, out15, 6);
1455     dst14 = LD_UB(dst + 5 * dst_stride);
1456     dst15 = LD_UB(dst + 10 * dst_stride);
1457     ILVR_B2_SH(zero, dst14, zero, dst15, res14, res15);
1458     ADD2(res14, out14, res15, out15, res14, res15);
1459     CLIP_SH2_0_255(res14, res15);
1460     PCKEV_B2_SH(res14, res14, res15, res15, res14, res15);
1461     ST_D1(res14, 0, dst + 5 * dst_stride);
1462     ST_D1(res15, 0, dst + 10 * dst_stride);
1463 }
1464 
vp9_iadst16x16_colcol_addblk_msa(int16_t * input,uint8_t * dst,int32_t dst_stride)1465 static void vp9_iadst16x16_colcol_addblk_msa(int16_t *input, uint8_t *dst,
1466                                              int32_t dst_stride)
1467 {
1468     int16_t out_arr[16 * 16] ALLOC_ALIGNED(ALIGNMENT);
1469     int16_t *out = out_arr;
1470     int32_t i;
1471 
1472     /* transform rows */
1473     for (i = 0; i < 2; i++) {
1474         /* process 16 * 8 block */
1475         vp9_iadst16_1d_columns_msa((input + (i << 3)), (out + (i << 7)));
1476     }
1477 
1478     /* transform columns */
1479     for (i = 0; i < 2; i++) {
1480         /* process 8 * 16 block */
1481         vp9_iadst16_1d_columns_addblk_msa((out + (i << 3)), (dst + (i << 3)),
1482                                           dst_stride);
1483     }
1484 }
1485 
vp9_iadst_idct_16x16_add_msa(int16_t * input,uint8_t * dst,int32_t dst_stride,int32_t eob)1486 static void vp9_iadst_idct_16x16_add_msa(int16_t *input, uint8_t *dst,
1487                                          int32_t dst_stride, int32_t eob)
1488 {
1489     int32_t i;
1490     int16_t out[16 * 16];
1491     int16_t *out_ptr = &out[0];
1492 
1493     /* transform rows */
1494     for (i = 0; i < 2; i++) {
1495         /* process 8 * 16 block */
1496         vp9_iadst16_1d_columns_msa((input + (i << 3)), (out_ptr + (i << 7)));
1497     }
1498 
1499     /* transform columns */
1500     for (i = 0; i < 2; i++) {
1501         /* process 8 * 16 block */
1502         vp9_idct16_1d_columns_addblk_msa((out_ptr + (i << 3)),
1503                                          (dst + (i << 3)), dst_stride);
1504     }
1505 }
1506 
vp9_idct_iadst_16x16_add_msa(int16_t * input,uint8_t * dst,int32_t dst_stride,int32_t eob)1507 static void vp9_idct_iadst_16x16_add_msa(int16_t *input, uint8_t *dst,
1508                                          int32_t dst_stride, int32_t eob)
1509 {
1510     int32_t i;
1511     int16_t out[16 * 16];
1512     int16_t *out_ptr = &out[0];
1513 
1514     /* transform rows */
1515     for (i = 0; i < 2; i++) {
1516         /* process 8 * 16 block */
1517         vp9_idct16_1d_columns_msa((input + (i << 3)), (out_ptr + (i << 7)));
1518     }
1519 
1520     /* transform columns */
1521     for (i = 0; i < 2; i++) {
1522         /* process 8 * 16 block */
1523         vp9_iadst16_1d_columns_addblk_msa((out_ptr + (i << 3)),
1524                                           (dst + (i << 3)), dst_stride);
1525     }
1526 }
1527 
vp9_idct_butterfly_transpose_store(int16_t * tmp_buf,int16_t * tmp_eve_buf,int16_t * tmp_odd_buf,int16_t * dst)1528 static void vp9_idct_butterfly_transpose_store(int16_t *tmp_buf,
1529                                                int16_t *tmp_eve_buf,
1530                                                int16_t *tmp_odd_buf,
1531                                                int16_t *dst)
1532 {
1533     v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3;
1534     v8i16 m0, m1, m2, m3, m4, m5, m6, m7, n0, n1, n2, n3, n4, n5, n6, n7;
1535 
1536     /* FINAL BUTTERFLY : Dependency on Even & Odd */
1537     vec0 = LD_SH(tmp_odd_buf);
1538     vec1 = LD_SH(tmp_odd_buf + 9 * 8);
1539     vec2 = LD_SH(tmp_odd_buf + 14 * 8);
1540     vec3 = LD_SH(tmp_odd_buf + 6 * 8);
1541     loc0 = LD_SH(tmp_eve_buf);
1542     loc1 = LD_SH(tmp_eve_buf + 8 * 8);
1543     loc2 = LD_SH(tmp_eve_buf + 4 * 8);
1544     loc3 = LD_SH(tmp_eve_buf + 12 * 8);
1545 
1546     ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m0, m4, m2, m6);
1547 
1548     ST_SH((loc0 - vec3), (tmp_buf + 31 * 8));
1549     ST_SH((loc1 - vec2), (tmp_buf + 23 * 8));
1550     ST_SH((loc2 - vec1), (tmp_buf + 27 * 8));
1551     ST_SH((loc3 - vec0), (tmp_buf + 19 * 8));
1552 
1553     /* Load 8 & Store 8 */
1554     vec0 = LD_SH(tmp_odd_buf + 4 * 8);
1555     vec1 = LD_SH(tmp_odd_buf + 13 * 8);
1556     vec2 = LD_SH(tmp_odd_buf + 10 * 8);
1557     vec3 = LD_SH(tmp_odd_buf + 3 * 8);
1558     loc0 = LD_SH(tmp_eve_buf + 2 * 8);
1559     loc1 = LD_SH(tmp_eve_buf + 10 * 8);
1560     loc2 = LD_SH(tmp_eve_buf + 6 * 8);
1561     loc3 = LD_SH(tmp_eve_buf + 14 * 8);
1562 
1563     ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m1, m5, m3, m7);
1564 
1565     ST_SH((loc0 - vec3), (tmp_buf + 29 * 8));
1566     ST_SH((loc1 - vec2), (tmp_buf + 21 * 8));
1567     ST_SH((loc2 - vec1), (tmp_buf + 25 * 8));
1568     ST_SH((loc3 - vec0), (tmp_buf + 17 * 8));
1569 
1570     /* Load 8 & Store 8 */
1571     vec0 = LD_SH(tmp_odd_buf + 2 * 8);
1572     vec1 = LD_SH(tmp_odd_buf + 11 * 8);
1573     vec2 = LD_SH(tmp_odd_buf + 12 * 8);
1574     vec3 = LD_SH(tmp_odd_buf + 7 * 8);
1575     loc0 = LD_SH(tmp_eve_buf + 1 * 8);
1576     loc1 = LD_SH(tmp_eve_buf + 9 * 8);
1577     loc2 = LD_SH(tmp_eve_buf + 5 * 8);
1578     loc3 = LD_SH(tmp_eve_buf + 13 * 8);
1579 
1580     ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n0, n4, n2, n6);
1581 
1582     ST_SH((loc0 - vec3), (tmp_buf + 30 * 8));
1583     ST_SH((loc1 - vec2), (tmp_buf + 22 * 8));
1584     ST_SH((loc2 - vec1), (tmp_buf + 26 * 8));
1585     ST_SH((loc3 - vec0), (tmp_buf + 18 * 8));
1586 
1587     /* Load 8 & Store 8 */
1588     vec0 = LD_SH(tmp_odd_buf + 5 * 8);
1589     vec1 = LD_SH(tmp_odd_buf + 15 * 8);
1590     vec2 = LD_SH(tmp_odd_buf + 8 * 8);
1591     vec3 = LD_SH(tmp_odd_buf + 1 * 8);
1592     loc0 = LD_SH(tmp_eve_buf + 3 * 8);
1593     loc1 = LD_SH(tmp_eve_buf + 11 * 8);
1594     loc2 = LD_SH(tmp_eve_buf + 7 * 8);
1595     loc3 = LD_SH(tmp_eve_buf + 15 * 8);
1596 
1597     ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n1, n5, n3, n7);
1598 
1599     ST_SH((loc0 - vec3), (tmp_buf + 28 * 8));
1600     ST_SH((loc1 - vec2), (tmp_buf + 20 * 8));
1601     ST_SH((loc2 - vec1), (tmp_buf + 24 * 8));
1602     ST_SH((loc3 - vec0), (tmp_buf + 16 * 8));
1603 
1604     /* Transpose : 16 vectors */
1605     /* 1st & 2nd 8x8 */
1606     TRANSPOSE8x8_SH_SH(m0, n0, m1, n1, m2, n2, m3, n3,
1607                        m0, n0, m1, n1, m2, n2, m3, n3);
1608     ST_SH4(m0, n0, m1, n1, (dst + 0), 32);
1609     ST_SH4(m2, n2, m3, n3, (dst + 4 * 32), 32);
1610 
1611     TRANSPOSE8x8_SH_SH(m4, n4, m5, n5, m6, n6, m7, n7,
1612                        m4, n4, m5, n5, m6, n6, m7, n7);
1613     ST_SH4(m4, n4, m5, n5, (dst + 8), 32);
1614     ST_SH4(m6, n6, m7, n7, (dst + 8 + 4 * 32), 32);
1615 
1616     /* 3rd & 4th 8x8 */
1617     LD_SH8((tmp_buf + 8 * 16), 8, m0, n0, m1, n1, m2, n2, m3, n3);
1618     LD_SH8((tmp_buf + 12 * 16), 8, m4, n4, m5, n5, m6, n6, m7, n7);
1619     TRANSPOSE8x8_SH_SH(m0, n0, m1, n1, m2, n2, m3, n3,
1620                        m0, n0, m1, n1, m2, n2, m3, n3);
1621     ST_SH4(m0, n0, m1, n1, (dst + 16), 32);
1622     ST_SH4(m2, n2, m3, n3, (dst + 16 + 4 * 32), 32);
1623 
1624     TRANSPOSE8x8_SH_SH(m4, n4, m5, n5, m6, n6, m7, n7,
1625                        m4, n4, m5, n5, m6, n6, m7, n7);
1626     ST_SH4(m4, n4, m5, n5, (dst + 24), 32);
1627     ST_SH4(m6, n6, m7, n7, (dst + 24 + 4 * 32), 32);
1628 }
1629 
vp9_idct8x32_column_even_process_store(int16_t * tmp_buf,int16_t * tmp_eve_buf)1630 static void vp9_idct8x32_column_even_process_store(int16_t *tmp_buf,
1631                                                    int16_t *tmp_eve_buf)
1632 {
1633     v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3;
1634     v8i16 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7;
1635     v8i16 stp0, stp1, stp2, stp3, stp4, stp5, stp6, stp7;
1636     v8i16 zero = { 0 };
1637 
1638     /* Even stage 1 */
1639     LD_SH8(tmp_buf, (4 * 32), reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7);
1640     ST_SH8(zero, zero, zero, zero, zero, zero, zero, zero, tmp_buf, (4 * 32));
1641     tmp_buf += (2 * 32);
1642 
1643     VP9_DOTP_CONST_PAIR(reg1, reg7, cospi_28_64, cospi_4_64, reg1, reg7);
1644     VP9_DOTP_CONST_PAIR(reg5, reg3, cospi_12_64, cospi_20_64, reg5, reg3);
1645     BUTTERFLY_4(reg1, reg7, reg3, reg5, vec1, vec3, vec2, vec0);
1646     VP9_DOTP_CONST_PAIR(vec2, vec0, cospi_16_64, cospi_16_64, loc2, loc3);
1647 
1648     loc1 = vec3;
1649     loc0 = vec1;
1650 
1651     VP9_DOTP_CONST_PAIR(reg0, reg4, cospi_16_64, cospi_16_64, reg0, reg4);
1652     VP9_DOTP_CONST_PAIR(reg2, reg6, cospi_24_64, cospi_8_64, reg2, reg6);
1653     BUTTERFLY_4(reg4, reg0, reg2, reg6, vec1, vec3, vec2, vec0);
1654     BUTTERFLY_4(vec0, vec1, loc1, loc0, stp3, stp0, stp7, stp4);
1655     BUTTERFLY_4(vec2, vec3, loc3, loc2, stp2, stp1, stp6, stp5);
1656 
1657     /* Even stage 2 */
1658     /* Load 8 */
1659     LD_SH8(tmp_buf, (4 * 32), reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7);
1660     ST_SH8(zero, zero, zero, zero, zero, zero, zero, zero, tmp_buf, (4 * 32));
1661 
1662     VP9_DOTP_CONST_PAIR(reg0, reg7, cospi_30_64, cospi_2_64, reg0, reg7);
1663     VP9_DOTP_CONST_PAIR(reg4, reg3, cospi_14_64, cospi_18_64, reg4, reg3);
1664     VP9_DOTP_CONST_PAIR(reg2, reg5, cospi_22_64, cospi_10_64, reg2, reg5);
1665     VP9_DOTP_CONST_PAIR(reg6, reg1, cospi_6_64, cospi_26_64, reg6, reg1);
1666 
1667     vec0 = reg0 + reg4;
1668     reg0 = reg0 - reg4;
1669     reg4 = reg6 + reg2;
1670     reg6 = reg6 - reg2;
1671     reg2 = reg1 + reg5;
1672     reg1 = reg1 - reg5;
1673     reg5 = reg7 + reg3;
1674     reg7 = reg7 - reg3;
1675     reg3 = vec0;
1676 
1677     vec1 = reg2;
1678     reg2 = reg3 + reg4;
1679     reg3 = reg3 - reg4;
1680     reg4 = reg5 - vec1;
1681     reg5 = reg5 + vec1;
1682 
1683     VP9_DOTP_CONST_PAIR(reg7, reg0, cospi_24_64, cospi_8_64, reg0, reg7);
1684     VP9_DOTP_CONST_PAIR((-reg6), reg1, cospi_24_64, cospi_8_64, reg6, reg1);
1685 
1686     vec0 = reg0 - reg6;
1687     reg0 = reg0 + reg6;
1688     vec1 = reg7 - reg1;
1689     reg7 = reg7 + reg1;
1690 
1691     VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, reg6, reg1);
1692     VP9_DOTP_CONST_PAIR(reg4, reg3, cospi_16_64, cospi_16_64, reg3, reg4);
1693 
1694     /* Even stage 3 : Dependency on Even stage 1 & Even stage 2 */
1695     /* Store 8 */
1696     BUTTERFLY_4(stp0, stp1, reg7, reg5, loc1, loc3, loc2, loc0);
1697     ST_SH2(loc1, loc3, tmp_eve_buf, 8);
1698     ST_SH2(loc2, loc0, (tmp_eve_buf + 14 * 8), 8);
1699 
1700     BUTTERFLY_4(stp2, stp3, reg4, reg1, loc1, loc3, loc2, loc0);
1701     ST_SH2(loc1, loc3, (tmp_eve_buf + 2 * 8), 8);
1702     ST_SH2(loc2, loc0, (tmp_eve_buf + 12 * 8), 8);
1703 
1704     /* Store 8 */
1705     BUTTERFLY_4(stp4, stp5, reg6, reg3, loc1, loc3, loc2, loc0);
1706     ST_SH2(loc1, loc3, (tmp_eve_buf + 4 * 8), 8);
1707     ST_SH2(loc2, loc0, (tmp_eve_buf + 10 * 8), 8);
1708 
1709     BUTTERFLY_4(stp6, stp7, reg2, reg0, loc1, loc3, loc2, loc0);
1710     ST_SH2(loc1, loc3, (tmp_eve_buf + 6 * 8), 8);
1711     ST_SH2(loc2, loc0, (tmp_eve_buf + 8 * 8), 8);
1712 }
1713 
vp9_idct8x32_column_odd_process_store(int16_t * tmp_buf,int16_t * tmp_odd_buf)1714 static void vp9_idct8x32_column_odd_process_store(int16_t *tmp_buf,
1715                                                   int16_t *tmp_odd_buf)
1716 {
1717     v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3;
1718     v8i16 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7;
1719     v8i16 zero = { 0 };
1720 
1721     /* Odd stage 1 */
1722     reg0 = LD_SH(tmp_buf + 32);
1723     reg1 = LD_SH(tmp_buf + 7 * 32);
1724     reg2 = LD_SH(tmp_buf + 9 * 32);
1725     reg3 = LD_SH(tmp_buf + 15 * 32);
1726     reg4 = LD_SH(tmp_buf + 17 * 32);
1727     reg5 = LD_SH(tmp_buf + 23 * 32);
1728     reg6 = LD_SH(tmp_buf + 25 * 32);
1729     reg7 = LD_SH(tmp_buf + 31 * 32);
1730 
1731     ST_SH(zero, tmp_buf + 32);
1732     ST_SH(zero, tmp_buf + 7 * 32);
1733     ST_SH(zero, tmp_buf + 9 * 32);
1734     ST_SH(zero, tmp_buf + 15 * 32);
1735     ST_SH(zero, tmp_buf + 17 * 32);
1736     ST_SH(zero, tmp_buf + 23 * 32);
1737     ST_SH(zero, tmp_buf + 25 * 32);
1738     ST_SH(zero, tmp_buf + 31 * 32);
1739 
1740     VP9_DOTP_CONST_PAIR(reg0, reg7, cospi_31_64, cospi_1_64, reg0, reg7);
1741     VP9_DOTP_CONST_PAIR(reg4, reg3, cospi_15_64, cospi_17_64, reg3, reg4);
1742     VP9_DOTP_CONST_PAIR(reg2, reg5, cospi_23_64, cospi_9_64, reg2, reg5);
1743     VP9_DOTP_CONST_PAIR(reg6, reg1, cospi_7_64, cospi_25_64, reg1, reg6);
1744 
1745     vec0 = reg0 + reg3;
1746     reg0 = reg0 - reg3;
1747     reg3 = reg7 + reg4;
1748     reg7 = reg7 - reg4;
1749     reg4 = reg1 + reg2;
1750     reg1 = reg1 - reg2;
1751     reg2 = reg6 + reg5;
1752     reg6 = reg6 - reg5;
1753     reg5 = vec0;
1754 
1755     /* 4 Stores */
1756     ADD2(reg5, reg4, reg3, reg2, vec0, vec1);
1757     ST_SH2(vec0, vec1, (tmp_odd_buf + 4 * 8), 8);
1758     SUB2(reg5, reg4, reg3, reg2, vec0, vec1);
1759     VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_24_64, cospi_8_64, vec0, vec1);
1760     ST_SH2(vec0, vec1, tmp_odd_buf, 8);
1761 
1762     /* 4 Stores */
1763     VP9_DOTP_CONST_PAIR(reg7, reg0, cospi_28_64, cospi_4_64, reg0, reg7);
1764     VP9_DOTP_CONST_PAIR(reg6, reg1, -cospi_4_64, cospi_28_64, reg1, reg6);
1765     BUTTERFLY_4(reg0, reg7, reg6, reg1, vec0, vec1, vec2, vec3);
1766     ST_SH2(vec0, vec1, (tmp_odd_buf + 6 * 8), 8);
1767     VP9_DOTP_CONST_PAIR(vec2, vec3, cospi_24_64, cospi_8_64, vec2, vec3);
1768     ST_SH2(vec2, vec3, (tmp_odd_buf + 2 * 8), 8);
1769 
1770     /* Odd stage 2 */
1771     /* 8 loads */
1772     reg0 = LD_SH(tmp_buf + 3 * 32);
1773     reg1 = LD_SH(tmp_buf + 5 * 32);
1774     reg2 = LD_SH(tmp_buf + 11 * 32);
1775     reg3 = LD_SH(tmp_buf + 13 * 32);
1776     reg4 = LD_SH(tmp_buf + 19 * 32);
1777     reg5 = LD_SH(tmp_buf + 21 * 32);
1778     reg6 = LD_SH(tmp_buf + 27 * 32);
1779     reg7 = LD_SH(tmp_buf + 29 * 32);
1780 
1781     ST_SH(zero, tmp_buf + 3 * 32);
1782     ST_SH(zero, tmp_buf + 5 * 32);
1783     ST_SH(zero, tmp_buf + 11 * 32);
1784     ST_SH(zero, tmp_buf + 13 * 32);
1785     ST_SH(zero, tmp_buf + 19 * 32);
1786     ST_SH(zero, tmp_buf + 21 * 32);
1787     ST_SH(zero, tmp_buf + 27 * 32);
1788     ST_SH(zero, tmp_buf + 29 * 32);
1789 
1790     VP9_DOTP_CONST_PAIR(reg1, reg6, cospi_27_64, cospi_5_64, reg1, reg6);
1791     VP9_DOTP_CONST_PAIR(reg5, reg2, cospi_11_64, cospi_21_64, reg2, reg5);
1792     VP9_DOTP_CONST_PAIR(reg3, reg4, cospi_19_64, cospi_13_64, reg3, reg4);
1793     VP9_DOTP_CONST_PAIR(reg7, reg0, cospi_3_64, cospi_29_64, reg0, reg7);
1794 
1795     /* 4 Stores */
1796     SUB4(reg1, reg2, reg6, reg5, reg0, reg3, reg7, reg4,
1797          vec0, vec1, vec2, vec3);
1798     VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_12_64, cospi_20_64, loc0, loc1);
1799     VP9_DOTP_CONST_PAIR(vec3, vec2, -cospi_20_64, cospi_12_64, loc2, loc3);
1800     BUTTERFLY_4(loc2, loc3, loc1, loc0, vec0, vec1, vec3, vec2);
1801     ST_SH2(vec0, vec1, (tmp_odd_buf + 12 * 8), 3 * 8);
1802     VP9_DOTP_CONST_PAIR(vec3, vec2, -cospi_8_64, cospi_24_64, vec0, vec1);
1803     ST_SH2(vec0, vec1, (tmp_odd_buf + 10 * 8), 8);
1804 
1805     /* 4 Stores */
1806     ADD4(reg0, reg3, reg1, reg2, reg5, reg6, reg4, reg7,
1807          vec0, vec1, vec2, vec3);
1808     BUTTERFLY_4(vec0, vec3, vec2, vec1, reg0, reg1, reg3, reg2);
1809     ST_SH2(reg0, reg1, (tmp_odd_buf + 13 * 8), 8);
1810     VP9_DOTP_CONST_PAIR(reg3, reg2, -cospi_8_64, cospi_24_64, reg0, reg1);
1811     ST_SH2(reg0, reg1, (tmp_odd_buf + 8 * 8), 8);
1812 
1813     /* Odd stage 3 : Dependency on Odd stage 1 & Odd stage 2 */
1814     /* Load 8 & Store 8 */
1815     LD_SH4(tmp_odd_buf, 8, reg0, reg1, reg2, reg3);
1816     LD_SH4((tmp_odd_buf + 8 * 8), 8, reg4, reg5, reg6, reg7);
1817 
1818     ADD4(reg0, reg4, reg1, reg5, reg2, reg6, reg3, reg7,
1819          loc0, loc1, loc2, loc3);
1820     ST_SH4(loc0, loc1, loc2, loc3, tmp_odd_buf, 8);
1821 
1822     SUB2(reg0, reg4, reg1, reg5, vec0, vec1);
1823     VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc0, loc1);
1824 
1825     SUB2(reg2, reg6, reg3, reg7, vec0, vec1);
1826     VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc2, loc3);
1827     ST_SH4(loc0, loc1, loc2, loc3, (tmp_odd_buf + 8 * 8), 8);
1828 
1829     /* Load 8 & Store 8 */
1830     LD_SH4((tmp_odd_buf + 4 * 8), 8, reg1, reg2, reg0, reg3);
1831     LD_SH4((tmp_odd_buf + 12 * 8), 8, reg4, reg5, reg6, reg7);
1832 
1833     ADD4(reg0, reg4, reg1, reg5, reg2, reg6, reg3, reg7,
1834          loc0, loc1, loc2, loc3);
1835     ST_SH4(loc0, loc1, loc2, loc3, (tmp_odd_buf + 4 * 8), 8);
1836 
1837     SUB2(reg0, reg4, reg3, reg7, vec0, vec1);
1838     VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc0, loc1);
1839 
1840     SUB2(reg1, reg5, reg2, reg6, vec0, vec1);
1841     VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc2, loc3);
1842     ST_SH4(loc0, loc1, loc2, loc3, (tmp_odd_buf + 12 * 8), 8);
1843 }
1844 
vp9_idct8x32_column_butterfly_addblk(int16_t * tmp_eve_buf,int16_t * tmp_odd_buf,uint8_t * dst,int32_t dst_stride)1845 static void vp9_idct8x32_column_butterfly_addblk(int16_t *tmp_eve_buf,
1846                                                  int16_t *tmp_odd_buf,
1847                                                  uint8_t *dst,
1848                                                  int32_t dst_stride)
1849 {
1850     v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3;
1851     v8i16 m0, m1, m2, m3, m4, m5, m6, m7, n0, n1, n2, n3, n4, n5, n6, n7;
1852 
1853     /* FINAL BUTTERFLY : Dependency on Even & Odd */
1854     vec0 = LD_SH(tmp_odd_buf);
1855     vec1 = LD_SH(tmp_odd_buf + 9 * 8);
1856     vec2 = LD_SH(tmp_odd_buf + 14 * 8);
1857     vec3 = LD_SH(tmp_odd_buf + 6 * 8);
1858     loc0 = LD_SH(tmp_eve_buf);
1859     loc1 = LD_SH(tmp_eve_buf + 8 * 8);
1860     loc2 = LD_SH(tmp_eve_buf + 4 * 8);
1861     loc3 = LD_SH(tmp_eve_buf + 12 * 8);
1862 
1863     ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m0, m4, m2, m6);
1864     SRARI_H4_SH(m0, m2, m4, m6, 6);
1865     VP9_ADDBLK_ST8x4_UB(dst, (4 * dst_stride), m0, m2, m4, m6);
1866 
1867     SUB4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m6, m2, m4, m0);
1868     SRARI_H4_SH(m0, m2, m4, m6, 6);
1869     VP9_ADDBLK_ST8x4_UB((dst + 19 * dst_stride), (4 * dst_stride),
1870                         m0, m2, m4, m6);
1871 
1872     /* Load 8 & Store 8 */
1873     vec0 = LD_SH(tmp_odd_buf + 4 * 8);
1874     vec1 = LD_SH(tmp_odd_buf + 13 * 8);
1875     vec2 = LD_SH(tmp_odd_buf + 10 * 8);
1876     vec3 = LD_SH(tmp_odd_buf + 3 * 8);
1877     loc0 = LD_SH(tmp_eve_buf + 2 * 8);
1878     loc1 = LD_SH(tmp_eve_buf + 10 * 8);
1879     loc2 = LD_SH(tmp_eve_buf + 6 * 8);
1880     loc3 = LD_SH(tmp_eve_buf + 14 * 8);
1881 
1882     ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m1, m5, m3, m7);
1883     SRARI_H4_SH(m1, m3, m5, m7, 6);
1884     VP9_ADDBLK_ST8x4_UB((dst + 2 * dst_stride), (4 * dst_stride),
1885                         m1, m3, m5, m7);
1886 
1887     SUB4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m7, m3, m5, m1);
1888     SRARI_H4_SH(m1, m3, m5, m7, 6);
1889     VP9_ADDBLK_ST8x4_UB((dst + 17 * dst_stride), (4 * dst_stride),
1890                         m1, m3, m5, m7);
1891 
1892     /* Load 8 & Store 8 */
1893     vec0 = LD_SH(tmp_odd_buf + 2 * 8);
1894     vec1 = LD_SH(tmp_odd_buf + 11 * 8);
1895     vec2 = LD_SH(tmp_odd_buf + 12 * 8);
1896     vec3 = LD_SH(tmp_odd_buf + 7 * 8);
1897     loc0 = LD_SH(tmp_eve_buf + 1 * 8);
1898     loc1 = LD_SH(tmp_eve_buf + 9 * 8);
1899     loc2 = LD_SH(tmp_eve_buf + 5 * 8);
1900     loc3 = LD_SH(tmp_eve_buf + 13 * 8);
1901 
1902     ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n0, n4, n2, n6);
1903     SRARI_H4_SH(n0, n2, n4, n6, 6);
1904     VP9_ADDBLK_ST8x4_UB((dst + 1 * dst_stride), (4 * dst_stride),
1905                         n0, n2, n4, n6);
1906 
1907     SUB4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n6, n2, n4, n0);
1908     SRARI_H4_SH(n0, n2, n4, n6, 6);
1909     VP9_ADDBLK_ST8x4_UB((dst + 18 * dst_stride), (4 * dst_stride),
1910                         n0, n2, n4, n6);
1911 
1912     /* Load 8 & Store 8 */
1913     vec0 = LD_SH(tmp_odd_buf + 5 * 8);
1914     vec1 = LD_SH(tmp_odd_buf + 15 * 8);
1915     vec2 = LD_SH(tmp_odd_buf + 8 * 8);
1916     vec3 = LD_SH(tmp_odd_buf + 1 * 8);
1917     loc0 = LD_SH(tmp_eve_buf + 3 * 8);
1918     loc1 = LD_SH(tmp_eve_buf + 11 * 8);
1919     loc2 = LD_SH(tmp_eve_buf + 7 * 8);
1920     loc3 = LD_SH(tmp_eve_buf + 15 * 8);
1921 
1922     ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n1, n5, n3, n7);
1923     SRARI_H4_SH(n1, n3, n5, n7, 6);
1924     VP9_ADDBLK_ST8x4_UB((dst + 3 * dst_stride), (4 * dst_stride),
1925                         n1, n3, n5, n7);
1926 
1927     SUB4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n7, n3, n5, n1);
1928     SRARI_H4_SH(n1, n3, n5, n7, 6);
1929     VP9_ADDBLK_ST8x4_UB((dst + 16 * dst_stride), (4 * dst_stride),
1930                         n1, n3, n5, n7);
1931 }
1932 
vp9_idct8x32_1d_columns_addblk_msa(int16_t * input,uint8_t * dst,int32_t dst_stride)1933 static void vp9_idct8x32_1d_columns_addblk_msa(int16_t *input, uint8_t *dst,
1934                                                int32_t dst_stride)
1935 {
1936     int16_t tmp_odd_buf[16 * 8] ALLOC_ALIGNED(ALIGNMENT);
1937     int16_t tmp_eve_buf[16 * 8] ALLOC_ALIGNED(ALIGNMENT);
1938 
1939     vp9_idct8x32_column_even_process_store(input, &tmp_eve_buf[0]);
1940     vp9_idct8x32_column_odd_process_store(input, &tmp_odd_buf[0]);
1941     vp9_idct8x32_column_butterfly_addblk(&tmp_eve_buf[0], &tmp_odd_buf[0],
1942                                          dst, dst_stride);
1943 }
1944 
vp9_idct8x32_1d_columns_msa(int16_t * input,int16_t * output,int16_t * tmp_buf)1945 static void vp9_idct8x32_1d_columns_msa(int16_t *input, int16_t *output,
1946                                         int16_t *tmp_buf)
1947 {
1948     int16_t tmp_odd_buf[16 * 8] ALLOC_ALIGNED(ALIGNMENT);
1949     int16_t tmp_eve_buf[16 * 8] ALLOC_ALIGNED(ALIGNMENT);
1950 
1951     vp9_idct8x32_column_even_process_store(input, &tmp_eve_buf[0]);
1952     vp9_idct8x32_column_odd_process_store(input, &tmp_odd_buf[0]);
1953     vp9_idct_butterfly_transpose_store(tmp_buf, &tmp_eve_buf[0],
1954                                        &tmp_odd_buf[0], output);
1955 }
1956 
vp9_idct32x32_1_add_msa(int16_t * input,uint8_t * dst,int32_t dst_stride)1957 static void vp9_idct32x32_1_add_msa(int16_t *input, uint8_t *dst,
1958                                     int32_t dst_stride)
1959 {
1960     int32_t i;
1961     int16_t out;
1962     v16u8 dst0, dst1, dst2, dst3, tmp0, tmp1, tmp2, tmp3;
1963     v8i16 res0, res1, res2, res3, res4, res5, res6, res7, vec;
1964 
1965     out = ROUND_POWER_OF_TWO((input[0] * cospi_16_64), VP9_DCT_CONST_BITS);
1966     out = ROUND_POWER_OF_TWO((out * cospi_16_64), VP9_DCT_CONST_BITS);
1967     out = ROUND_POWER_OF_TWO(out, 6);
1968     input[0] = 0;
1969 
1970     vec = __msa_fill_h(out);
1971 
1972     for (i = 16; i--;) {
1973         LD_UB2(dst, 16, dst0, dst1);
1974         LD_UB2(dst + dst_stride, 16, dst2, dst3);
1975 
1976         UNPCK_UB_SH(dst0, res0, res4);
1977         UNPCK_UB_SH(dst1, res1, res5);
1978         UNPCK_UB_SH(dst2, res2, res6);
1979         UNPCK_UB_SH(dst3, res3, res7);
1980         ADD4(res0, vec, res1, vec, res2, vec, res3, vec, res0, res1, res2,
1981              res3);
1982         ADD4(res4, vec, res5, vec, res6, vec, res7, vec, res4, res5, res6,
1983              res7);
1984         CLIP_SH8_0_255(res0, res1, res2, res3, res4, res5, res6, res7);
1985         PCKEV_B4_UB(res4, res0, res5, res1, res6, res2, res7, res3,
1986                     tmp0, tmp1, tmp2, tmp3);
1987 
1988         ST_UB2(tmp0, tmp1, dst, 16);
1989         dst += dst_stride;
1990         ST_UB2(tmp2, tmp3, dst, 16);
1991         dst += dst_stride;
1992     }
1993 }
1994 
vp9_idct32x32_34_colcol_addblk_msa(int16_t * input,uint8_t * dst,int32_t dst_stride)1995 static void vp9_idct32x32_34_colcol_addblk_msa(int16_t *input, uint8_t *dst,
1996                                                int32_t dst_stride)
1997 {
1998     int32_t i;
1999     int16_t out_arr[32 * 32] ALLOC_ALIGNED(ALIGNMENT);
2000     int16_t *out_ptr = out_arr;
2001     int16_t tmp_buf[8 * 32] ALLOC_ALIGNED(ALIGNMENT);
2002 
2003     for (i = 32; i--;) {
2004         __asm__ volatile (
2005             "sw     $zero,       (%[out_ptr])     \n\t"
2006             "sw     $zero,      4(%[out_ptr])     \n\t"
2007             "sw     $zero,      8(%[out_ptr])     \n\t"
2008             "sw     $zero,     12(%[out_ptr])     \n\t"
2009             "sw     $zero,     16(%[out_ptr])     \n\t"
2010             "sw     $zero,     20(%[out_ptr])     \n\t"
2011             "sw     $zero,     24(%[out_ptr])     \n\t"
2012             "sw     $zero,     28(%[out_ptr])     \n\t"
2013             "sw     $zero,     32(%[out_ptr])     \n\t"
2014             "sw     $zero,     36(%[out_ptr])     \n\t"
2015             "sw     $zero,     40(%[out_ptr])     \n\t"
2016             "sw     $zero,     44(%[out_ptr])     \n\t"
2017             "sw     $zero,     48(%[out_ptr])     \n\t"
2018             "sw     $zero,     52(%[out_ptr])     \n\t"
2019             "sw     $zero,     56(%[out_ptr])     \n\t"
2020             "sw     $zero,     60(%[out_ptr])     \n\t"
2021 
2022             :
2023             : [out_ptr] "r" (out_ptr)
2024         );
2025 
2026         out_ptr += 32;
2027     }
2028 
2029     out_ptr = out_arr;
2030 
2031     /* process 8*32 block */
2032     vp9_idct8x32_1d_columns_msa(input, out_ptr, &tmp_buf[0]);
2033 
2034     /* transform columns */
2035     for (i = 0; i < 4; i++) {
2036         /* process 8*32 block */
2037         vp9_idct8x32_1d_columns_addblk_msa((out_ptr + (i << 3)),
2038                                            (dst + (i << 3)), dst_stride);
2039     }
2040 }
2041 
vp9_idct32x32_colcol_addblk_msa(int16_t * input,uint8_t * dst,int32_t dst_stride)2042 static void vp9_idct32x32_colcol_addblk_msa(int16_t *input, uint8_t *dst,
2043                                             int32_t dst_stride)
2044 {
2045     int32_t i;
2046     int16_t out_arr[32 * 32] ALLOC_ALIGNED(ALIGNMENT);
2047     int16_t *out_ptr = out_arr;
2048     int16_t tmp_buf[8 * 32] ALLOC_ALIGNED(ALIGNMENT);
2049 
2050     /* transform rows */
2051     for (i = 0; i < 4; i++) {
2052         /* process 8*32 block */
2053         vp9_idct8x32_1d_columns_msa((input + (i << 3)), (out_ptr + (i << 8)),
2054                                     &tmp_buf[0]);
2055     }
2056 
2057     /* transform columns */
2058     for (i = 0; i < 4; i++) {
2059         /* process 8*32 block */
2060         vp9_idct8x32_1d_columns_addblk_msa((out_ptr + (i << 3)),
2061                                            (dst + (i << 3)), dst_stride);
2062     }
2063 }
2064 
ff_idct_idct_4x4_add_msa(uint8_t * dst,ptrdiff_t stride,int16_t * block,int eob)2065 void ff_idct_idct_4x4_add_msa(uint8_t *dst, ptrdiff_t stride,
2066                               int16_t *block, int eob)
2067 {
2068     if (eob > 1) {
2069         vp9_idct4x4_colcol_addblk_msa(block, dst, stride);
2070     }
2071     else {
2072         vp9_idct4x4_1_add_msa(block, dst, stride);
2073     }
2074 }
2075 
ff_idct_idct_8x8_add_msa(uint8_t * dst,ptrdiff_t stride,int16_t * block,int eob)2076 void ff_idct_idct_8x8_add_msa(uint8_t *dst, ptrdiff_t stride,
2077                               int16_t *block, int eob)
2078 {
2079     if (eob == 1) {
2080         vp9_idct8x8_1_add_msa(block, dst, stride);
2081     }
2082     else if (eob <= 12) {
2083         vp9_idct8x8_12_colcol_addblk_msa(block, dst, stride);
2084     }
2085     else {
2086         vp9_idct8x8_colcol_addblk_msa(block, dst, stride);
2087     }
2088 }
2089 
ff_idct_idct_16x16_add_msa(uint8_t * dst,ptrdiff_t stride,int16_t * block,int eob)2090 void ff_idct_idct_16x16_add_msa(uint8_t *dst, ptrdiff_t stride,
2091                                 int16_t *block, int eob)
2092 {
2093     if (eob == 1) {
2094         /* DC only DCT coefficient. */
2095         vp9_idct16x16_1_add_msa(block, dst, stride);
2096     }
2097     else if (eob <= 10) {
2098         vp9_idct16x16_10_colcol_addblk_msa(block, dst, stride);
2099     }
2100     else {
2101         vp9_idct16x16_colcol_addblk_msa(block, dst, stride);
2102     }
2103 }
2104 
ff_idct_idct_32x32_add_msa(uint8_t * dst,ptrdiff_t stride,int16_t * block,int eob)2105 void ff_idct_idct_32x32_add_msa(uint8_t *dst, ptrdiff_t stride,
2106                                 int16_t *block, int eob)
2107 {
2108     if (eob == 1) {
2109         vp9_idct32x32_1_add_msa(block, dst, stride);
2110     }
2111     else if (eob <= 34) {
2112         vp9_idct32x32_34_colcol_addblk_msa(block, dst, stride);
2113     }
2114     else {
2115         vp9_idct32x32_colcol_addblk_msa(block, dst, stride);
2116     }
2117 }
2118 
ff_iadst_iadst_4x4_add_msa(uint8_t * dst,ptrdiff_t stride,int16_t * block,int eob)2119 void ff_iadst_iadst_4x4_add_msa(uint8_t *dst, ptrdiff_t stride,
2120                                 int16_t *block, int eob)
2121 {
2122     vp9_iadst4x4_colcol_addblk_msa(block, dst, stride);
2123 }
2124 
ff_iadst_iadst_8x8_add_msa(uint8_t * dst,ptrdiff_t stride,int16_t * block,int eob)2125 void ff_iadst_iadst_8x8_add_msa(uint8_t *dst, ptrdiff_t stride,
2126                                 int16_t *block, int eob)
2127 {
2128     vp9_iadst8x8_colcol_addblk_msa(block, dst, stride);
2129 }
2130 
ff_iadst_iadst_16x16_add_msa(uint8_t * dst,ptrdiff_t stride,int16_t * block,int eob)2131 void ff_iadst_iadst_16x16_add_msa(uint8_t *dst, ptrdiff_t stride,
2132                                   int16_t *block, int eob)
2133 {
2134     vp9_iadst16x16_colcol_addblk_msa(block, dst, stride);
2135 }
2136 
ff_idct_iadst_4x4_add_msa(uint8_t * dst,ptrdiff_t stride,int16_t * block,int eob)2137 void ff_idct_iadst_4x4_add_msa(uint8_t *dst, ptrdiff_t stride,
2138                                int16_t *block, int eob)
2139 {
2140     vp9_idct_iadst_4x4_add_msa(block, dst, stride, eob);
2141 }
2142 
ff_idct_iadst_8x8_add_msa(uint8_t * dst,ptrdiff_t stride,int16_t * block,int eob)2143 void ff_idct_iadst_8x8_add_msa(uint8_t *dst, ptrdiff_t stride,
2144                                int16_t *block, int eob)
2145 {
2146     vp9_idct_iadst_8x8_add_msa(block, dst, stride, eob);
2147 }
2148 
ff_idct_iadst_16x16_add_msa(uint8_t * dst,ptrdiff_t stride,int16_t * block,int eob)2149 void ff_idct_iadst_16x16_add_msa(uint8_t *dst, ptrdiff_t stride,
2150                                  int16_t *block, int eob)
2151 {
2152     vp9_idct_iadst_16x16_add_msa(block, dst, stride, eob);
2153 }
2154 
ff_iadst_idct_4x4_add_msa(uint8_t * dst,ptrdiff_t stride,int16_t * block,int eob)2155 void ff_iadst_idct_4x4_add_msa(uint8_t *dst, ptrdiff_t stride,
2156                                int16_t *block, int eob)
2157 {
2158     vp9_iadst_idct_4x4_add_msa(block, dst, stride, eob);
2159 }
2160 
ff_iadst_idct_8x8_add_msa(uint8_t * dst,ptrdiff_t stride,int16_t * block,int eob)2161 void ff_iadst_idct_8x8_add_msa(uint8_t *dst, ptrdiff_t stride,
2162                                int16_t *block, int eob)
2163 {
2164     vp9_iadst_idct_8x8_add_msa(block, dst, stride, eob);
2165 }
2166 
ff_iadst_idct_16x16_add_msa(uint8_t * dst,ptrdiff_t stride,int16_t * block,int eob)2167 void ff_iadst_idct_16x16_add_msa(uint8_t *dst, ptrdiff_t stride,
2168                                  int16_t *block, int eob)
2169 {
2170     vp9_iadst_idct_16x16_add_msa(block, dst, stride, eob);
2171 }
2172