1 /*
2  *  Copyright (c) 2013 The WebM project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10 
11 #include <assert.h>
12 #include <stdio.h>
13 
14 #include "./vpx_dsp_rtcd.h"
15 #include "vpx_dsp/mips/convolve_common_dspr2.h"
16 #include "vpx_dsp/vpx_dsp_common.h"
17 #include "vpx_dsp/vpx_filter.h"
18 #include "vpx_ports/mem.h"
19 
20 #if HAVE_DSPR2
convolve_vert_4_dspr2(const uint8_t * src,int32_t src_stride,uint8_t * dst,int32_t dst_stride,const int16_t * filter_y,int32_t w,int32_t h)21 static void convolve_vert_4_dspr2(const uint8_t *src, int32_t src_stride,
22                                   uint8_t *dst, int32_t dst_stride,
23                                   const int16_t *filter_y, int32_t w,
24                                   int32_t h) {
25   int32_t x, y;
26   const uint8_t *src_ptr;
27   uint8_t *dst_ptr;
28   uint8_t *cm = vpx_ff_cropTbl;
29   uint32_t vector4a = 64;
30   uint32_t load1, load2, load3, load4;
31   uint32_t p1, p2;
32   uint32_t n1, n2;
33   uint32_t scratch1, scratch2;
34   uint32_t store1, store2;
35   int32_t vector1b, vector2b, vector3b, vector4b;
36   int32_t Temp1, Temp2;
37 
38   vector1b = ((const int32_t *)filter_y)[0];
39   vector2b = ((const int32_t *)filter_y)[1];
40   vector3b = ((const int32_t *)filter_y)[2];
41   vector4b = ((const int32_t *)filter_y)[3];
42 
43   src -= 3 * src_stride;
44 
45   for (y = h; y--;) {
46     /* prefetch data to cache memory */
47     prefetch_store(dst + dst_stride);
48 
49     for (x = 0; x < w; x += 4) {
50       src_ptr = src + x;
51       dst_ptr = dst + x;
52 
53       __asm__ __volatile__(
54           "ulw              %[load1],     0(%[src_ptr])                   \n\t"
55           "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
56           "ulw              %[load2],     0(%[src_ptr])                   \n\t"
57           "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
58           "ulw              %[load3],     0(%[src_ptr])                   \n\t"
59           "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
60           "ulw              %[load4],     0(%[src_ptr])                   \n\t"
61 
62           "mtlo             %[vector4a],  $ac0                            \n\t"
63           "mtlo             %[vector4a],  $ac1                            \n\t"
64           "mtlo             %[vector4a],  $ac2                            \n\t"
65           "mtlo             %[vector4a],  $ac3                            \n\t"
66           "mthi             $zero,        $ac0                            \n\t"
67           "mthi             $zero,        $ac1                            \n\t"
68           "mthi             $zero,        $ac2                            \n\t"
69           "mthi             $zero,        $ac3                            \n\t"
70 
71           "preceu.ph.qbr    %[scratch1],  %[load1]                        \n\t"
72           "preceu.ph.qbr    %[p1],        %[load2]                        \n\t"
73           "precrq.ph.w      %[n1],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
74           "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
75           "preceu.ph.qbr    %[scratch2],  %[load3]                        \n\t"
76           "preceu.ph.qbr    %[p2],        %[load4]                        \n\t"
77           "precrq.ph.w      %[n2],        %[p2],          %[scratch2]     \n\t" /* pixel 2 */
78           "append           %[p2],        %[scratch2],    16              \n\t" /* pixel 1 */
79 
80           "dpa.w.ph         $ac0,         %[p1],          %[vector1b]     \n\t"
81           "dpa.w.ph         $ac0,         %[p2],          %[vector2b]     \n\t"
82           "dpa.w.ph         $ac1,         %[n1],          %[vector1b]     \n\t"
83           "dpa.w.ph         $ac1,         %[n2],          %[vector2b]     \n\t"
84 
85           "preceu.ph.qbl    %[scratch1],  %[load1]                        \n\t"
86           "preceu.ph.qbl    %[p1],        %[load2]                        \n\t"
87           "precrq.ph.w      %[n1],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
88           "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
89           "preceu.ph.qbl    %[scratch2],  %[load3]                        \n\t"
90           "preceu.ph.qbl    %[p2],        %[load4]                        \n\t"
91           "precrq.ph.w      %[n2],        %[p2],          %[scratch2]     \n\t" /* pixel 2 */
92           "append           %[p2],        %[scratch2],    16              \n\t" /* pixel 1 */
93 
94           "dpa.w.ph         $ac2,         %[p1],          %[vector1b]     \n\t"
95           "dpa.w.ph         $ac2,         %[p2],          %[vector2b]     \n\t"
96           "dpa.w.ph         $ac3,         %[n1],          %[vector1b]     \n\t"
97           "dpa.w.ph         $ac3,         %[n2],          %[vector2b]     \n\t"
98 
99           "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
100           "ulw              %[load1],     0(%[src_ptr])                   \n\t"
101           "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
102           "ulw              %[load2],     0(%[src_ptr])                   \n\t"
103           "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
104           "ulw              %[load3],     0(%[src_ptr])                   \n\t"
105           "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
106           "ulw              %[load4],     0(%[src_ptr])                   \n\t"
107 
108           "preceu.ph.qbr    %[scratch1],  %[load1]                        \n\t"
109           "preceu.ph.qbr    %[p1],        %[load2]                        \n\t"
110           "precrq.ph.w      %[n1],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
111           "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
112           "preceu.ph.qbr    %[scratch2],  %[load3]                        \n\t"
113           "preceu.ph.qbr    %[p2],        %[load4]                        \n\t"
114           "precrq.ph.w      %[n2],        %[p2],          %[scratch2]     \n\t" /* pixel 2 */
115           "append           %[p2],        %[scratch2],    16              \n\t" /* pixel 1 */
116 
117           "dpa.w.ph         $ac0,         %[p1],          %[vector3b]     \n\t"
118           "dpa.w.ph         $ac0,         %[p2],          %[vector4b]     \n\t"
119           "extp             %[Temp1],     $ac0,           31              \n\t"
120           "dpa.w.ph         $ac1,         %[n1],          %[vector3b]     \n\t"
121           "dpa.w.ph         $ac1,         %[n2],          %[vector4b]     \n\t"
122           "extp             %[Temp2],     $ac1,           31              \n\t"
123 
124           "preceu.ph.qbl    %[scratch1],  %[load1]                        \n\t"
125           "preceu.ph.qbl    %[p1],        %[load2]                        \n\t"
126           "precrq.ph.w      %[n1],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
127           "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
128           "preceu.ph.qbl    %[scratch2],  %[load3]                        \n\t"
129           "preceu.ph.qbl    %[p2],        %[load4]                        \n\t"
130           "precrq.ph.w      %[n2],        %[p2],          %[scratch2]     \n\t" /* pixel 2 */
131           "append           %[p2],        %[scratch2],    16              \n\t" /* pixel 1 */
132 
133           "lbux             %[store1],    %[Temp1](%[cm])                 \n\t"
134           "dpa.w.ph         $ac2,         %[p1],          %[vector3b]     \n\t"
135           "dpa.w.ph         $ac2,         %[p2],          %[vector4b]     \n\t"
136           "extp             %[Temp1],     $ac2,           31              \n\t"
137 
138           "lbux             %[store2],    %[Temp2](%[cm])                 \n\t"
139           "dpa.w.ph         $ac3,         %[n1],          %[vector3b]     \n\t"
140           "dpa.w.ph         $ac3,         %[n2],          %[vector4b]     \n\t"
141           "extp             %[Temp2],     $ac3,           31              \n\t"
142 
143           "sb               %[store1],    0(%[dst_ptr])                   \n\t"
144           "sb               %[store2],    1(%[dst_ptr])                   \n\t"
145 
146           "lbux             %[store1],    %[Temp1](%[cm])                 \n\t"
147           "lbux             %[store2],    %[Temp2](%[cm])                 \n\t"
148 
149           "sb               %[store1],    2(%[dst_ptr])                   \n\t"
150           "sb               %[store2],    3(%[dst_ptr])                   \n\t"
151 
152           : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3),
153             [load4] "=&r"(load4), [p1] "=&r"(p1), [p2] "=&r"(p2),
154             [n1] "=&r"(n1), [n2] "=&r"(n2), [scratch1] "=&r"(scratch1),
155             [scratch2] "=&r"(scratch2), [Temp1] "=&r"(Temp1),
156             [Temp2] "=&r"(Temp2), [store1] "=&r"(store1),
157             [store2] "=&r"(store2), [src_ptr] "+r"(src_ptr)
158           : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b),
159             [vector3b] "r"(vector3b), [vector4b] "r"(vector4b),
160             [vector4a] "r"(vector4a), [src_stride] "r"(src_stride),
161             [cm] "r"(cm), [dst_ptr] "r"(dst_ptr));
162     }
163 
164     /* Next row... */
165     src += src_stride;
166     dst += dst_stride;
167   }
168 }
169 
convolve_vert_64_dspr2(const uint8_t * src,int32_t src_stride,uint8_t * dst,int32_t dst_stride,const int16_t * filter_y,int32_t h)170 static void convolve_vert_64_dspr2(const uint8_t *src, int32_t src_stride,
171                                    uint8_t *dst, int32_t dst_stride,
172                                    const int16_t *filter_y, int32_t h) {
173   int32_t x, y;
174   const uint8_t *src_ptr;
175   uint8_t *dst_ptr;
176   uint8_t *cm = vpx_ff_cropTbl;
177   uint32_t vector4a = 64;
178   uint32_t load1, load2, load3, load4;
179   uint32_t p1, p2;
180   uint32_t n1, n2;
181   uint32_t scratch1, scratch2;
182   uint32_t store1, store2;
183   int32_t vector1b, vector2b, vector3b, vector4b;
184   int32_t Temp1, Temp2;
185 
186   vector1b = ((const int32_t *)filter_y)[0];
187   vector2b = ((const int32_t *)filter_y)[1];
188   vector3b = ((const int32_t *)filter_y)[2];
189   vector4b = ((const int32_t *)filter_y)[3];
190 
191   src -= 3 * src_stride;
192 
193   for (y = h; y--;) {
194     /* prefetch data to cache memory */
195     prefetch_store(dst + dst_stride);
196     prefetch_store(dst + dst_stride + 32);
197 
198     for (x = 0; x < 64; x += 4) {
199       src_ptr = src + x;
200       dst_ptr = dst + x;
201 
202       __asm__ __volatile__(
203           "ulw              %[load1],     0(%[src_ptr])                   \n\t"
204           "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
205           "ulw              %[load2],     0(%[src_ptr])                   \n\t"
206           "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
207           "ulw              %[load3],     0(%[src_ptr])                   \n\t"
208           "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
209           "ulw              %[load4],     0(%[src_ptr])                   \n\t"
210 
211           "mtlo             %[vector4a],  $ac0                            \n\t"
212           "mtlo             %[vector4a],  $ac1                            \n\t"
213           "mtlo             %[vector4a],  $ac2                            \n\t"
214           "mtlo             %[vector4a],  $ac3                            \n\t"
215           "mthi             $zero,        $ac0                            \n\t"
216           "mthi             $zero,        $ac1                            \n\t"
217           "mthi             $zero,        $ac2                            \n\t"
218           "mthi             $zero,        $ac3                            \n\t"
219 
220           "preceu.ph.qbr    %[scratch1],  %[load1]                        \n\t"
221           "preceu.ph.qbr    %[p1],        %[load2]                        \n\t"
222           "precrq.ph.w      %[n1],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
223           "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
224           "preceu.ph.qbr    %[scratch2],  %[load3]                        \n\t"
225           "preceu.ph.qbr    %[p2],        %[load4]                        \n\t"
226           "precrq.ph.w      %[n2],        %[p2],          %[scratch2]     \n\t" /* pixel 2 */
227           "append           %[p2],        %[scratch2],    16              \n\t" /* pixel 1 */
228 
229           "dpa.w.ph         $ac0,         %[p1],          %[vector1b]     \n\t"
230           "dpa.w.ph         $ac0,         %[p2],          %[vector2b]     \n\t"
231           "dpa.w.ph         $ac1,         %[n1],          %[vector1b]     \n\t"
232           "dpa.w.ph         $ac1,         %[n2],          %[vector2b]     \n\t"
233 
234           "preceu.ph.qbl    %[scratch1],  %[load1]                        \n\t"
235           "preceu.ph.qbl    %[p1],        %[load2]                        \n\t"
236           "precrq.ph.w      %[n1],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
237           "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
238           "preceu.ph.qbl    %[scratch2],  %[load3]                        \n\t"
239           "preceu.ph.qbl    %[p2],        %[load4]                        \n\t"
240           "precrq.ph.w      %[n2],        %[p2],          %[scratch2]     \n\t" /* pixel 2 */
241           "append           %[p2],        %[scratch2],    16              \n\t" /* pixel 1 */
242 
243           "dpa.w.ph         $ac2,         %[p1],          %[vector1b]     \n\t"
244           "dpa.w.ph         $ac2,         %[p2],          %[vector2b]     \n\t"
245           "dpa.w.ph         $ac3,         %[n1],          %[vector1b]     \n\t"
246           "dpa.w.ph         $ac3,         %[n2],          %[vector2b]     \n\t"
247 
248           "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
249           "ulw              %[load1],     0(%[src_ptr])                   \n\t"
250           "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
251           "ulw              %[load2],     0(%[src_ptr])                   \n\t"
252           "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
253           "ulw              %[load3],     0(%[src_ptr])                   \n\t"
254           "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
255           "ulw              %[load4],     0(%[src_ptr])                   \n\t"
256 
257           "preceu.ph.qbr    %[scratch1],  %[load1]                        \n\t"
258           "preceu.ph.qbr    %[p1],        %[load2]                        \n\t"
259           "precrq.ph.w      %[n1],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
260           "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
261           "preceu.ph.qbr    %[scratch2],  %[load3]                        \n\t"
262           "preceu.ph.qbr    %[p2],        %[load4]                        \n\t"
263           "precrq.ph.w      %[n2],        %[p2],          %[scratch2]     \n\t" /* pixel 2 */
264           "append           %[p2],        %[scratch2],    16              \n\t" /* pixel 1 */
265 
266           "dpa.w.ph         $ac0,         %[p1],          %[vector3b]     \n\t"
267           "dpa.w.ph         $ac0,         %[p2],          %[vector4b]     \n\t"
268           "extp             %[Temp1],     $ac0,           31              \n\t"
269           "dpa.w.ph         $ac1,         %[n1],          %[vector3b]     \n\t"
270           "dpa.w.ph         $ac1,         %[n2],          %[vector4b]     \n\t"
271           "extp             %[Temp2],     $ac1,           31              \n\t"
272 
273           "preceu.ph.qbl    %[scratch1],  %[load1]                        \n\t"
274           "preceu.ph.qbl    %[p1],        %[load2]                        \n\t"
275           "precrq.ph.w      %[n1],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
276           "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
277           "preceu.ph.qbl    %[scratch2],  %[load3]                        \n\t"
278           "preceu.ph.qbl    %[p2],        %[load4]                        \n\t"
279           "precrq.ph.w      %[n2],        %[p2],          %[scratch2]     \n\t" /* pixel 2 */
280           "append           %[p2],        %[scratch2],    16              \n\t" /* pixel 1 */
281 
282           "lbux             %[store1],    %[Temp1](%[cm])                 \n\t"
283           "dpa.w.ph         $ac2,         %[p1],          %[vector3b]     \n\t"
284           "dpa.w.ph         $ac2,         %[p2],          %[vector4b]     \n\t"
285           "extp             %[Temp1],     $ac2,           31              \n\t"
286 
287           "lbux             %[store2],    %[Temp2](%[cm])                 \n\t"
288           "dpa.w.ph         $ac3,         %[n1],          %[vector3b]     \n\t"
289           "dpa.w.ph         $ac3,         %[n2],          %[vector4b]     \n\t"
290           "extp             %[Temp2],     $ac3,           31              \n\t"
291 
292           "sb               %[store1],    0(%[dst_ptr])                   \n\t"
293           "sb               %[store2],    1(%[dst_ptr])                   \n\t"
294 
295           "lbux             %[store1],    %[Temp1](%[cm])                 \n\t"
296           "lbux             %[store2],    %[Temp2](%[cm])                 \n\t"
297 
298           "sb               %[store1],    2(%[dst_ptr])                   \n\t"
299           "sb               %[store2],    3(%[dst_ptr])                   \n\t"
300 
301           : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3),
302             [load4] "=&r"(load4), [p1] "=&r"(p1), [p2] "=&r"(p2),
303             [n1] "=&r"(n1), [n2] "=&r"(n2), [scratch1] "=&r"(scratch1),
304             [scratch2] "=&r"(scratch2), [Temp1] "=&r"(Temp1),
305             [Temp2] "=&r"(Temp2), [store1] "=&r"(store1),
306             [store2] "=&r"(store2), [src_ptr] "+r"(src_ptr)
307           : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b),
308             [vector3b] "r"(vector3b), [vector4b] "r"(vector4b),
309             [vector4a] "r"(vector4a), [src_stride] "r"(src_stride),
310             [cm] "r"(cm), [dst_ptr] "r"(dst_ptr));
311     }
312 
313     /* Next row... */
314     src += src_stride;
315     dst += dst_stride;
316   }
317 }
318 
vpx_convolve8_vert_dspr2(const uint8_t * src,ptrdiff_t src_stride,uint8_t * dst,ptrdiff_t dst_stride,const InterpKernel * filter,int x0_q4,int x_step_q4,int y0_q4,int y_step_q4,int w,int h)319 void vpx_convolve8_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride,
320                               uint8_t *dst, ptrdiff_t dst_stride,
321                               const InterpKernel *filter, int x0_q4,
322                               int x_step_q4, int y0_q4, int y_step_q4, int w,
323                               int h) {
324   const int16_t *const filter_y = filter[y0_q4];
325   assert(y_step_q4 == 16);
326   assert(((const int32_t *)filter_y)[1] != 0x800000);
327 
328   if (vpx_get_filter_taps(filter_y) == 2) {
329     vpx_convolve2_vert_dspr2(src, src_stride, dst, dst_stride, filter, x0_q4,
330                              x_step_q4, y0_q4, y_step_q4, w, h);
331   } else {
332     uint32_t pos = 38;
333 
334     /* bit positon for extract from acc */
335     __asm__ __volatile__("wrdsp      %[pos],     1           \n\t"
336                          :
337                          : [pos] "r"(pos));
338 
339     prefetch_store(dst);
340 
341     switch (w) {
342       case 4:
343       case 8:
344       case 16:
345       case 32:
346         convolve_vert_4_dspr2(src, src_stride, dst, dst_stride, filter_y, w, h);
347         break;
348       case 64:
349         prefetch_store(dst + 32);
350         convolve_vert_64_dspr2(src, src_stride, dst, dst_stride, filter_y, h);
351         break;
352       default:
353         vpx_convolve8_vert_c(src, src_stride, dst, dst_stride, filter, x0_q4,
354                              x_step_q4, y0_q4, y_step_q4, w, h);
355         break;
356     }
357   }
358 }
359 
360 #endif
361