1 /*
2 * Copyright (c) 2013 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include <assert.h>
12 #include <stdio.h>
13
14 #include "./vpx_dsp_rtcd.h"
15 #include "vpx_dsp/mips/convolve_common_dspr2.h"
16 #include "vpx_dsp/vpx_convolve.h"
17 #include "vpx_dsp/vpx_dsp_common.h"
18 #include "vpx_ports/mem.h"
19
20 #if HAVE_DSPR2
convolve_avg_vert_4_dspr2(const uint8_t * src,int32_t src_stride,uint8_t * dst,int32_t dst_stride,const int16_t * filter_y,int32_t w,int32_t h)21 static void convolve_avg_vert_4_dspr2(const uint8_t *src, int32_t src_stride,
22 uint8_t *dst, int32_t dst_stride,
23 const int16_t *filter_y, int32_t w,
24 int32_t h) {
25 int32_t x, y;
26 const uint8_t *src_ptr;
27 uint8_t *dst_ptr;
28 uint8_t *cm = vpx_ff_cropTbl;
29 uint32_t vector4a = 64;
30 uint32_t load1, load2, load3, load4;
31 uint32_t p1, p2;
32 uint32_t n1, n2;
33 uint32_t scratch1, scratch2;
34 uint32_t store1, store2;
35 int32_t vector1b, vector2b, vector3b, vector4b;
36 int32_t Temp1, Temp2;
37
38 vector1b = ((const int32_t *)filter_y)[0];
39 vector2b = ((const int32_t *)filter_y)[1];
40 vector3b = ((const int32_t *)filter_y)[2];
41 vector4b = ((const int32_t *)filter_y)[3];
42
43 src -= 3 * src_stride;
44
45 for (y = h; y--;) {
46 /* prefetch data to cache memory */
47 prefetch_store(dst + dst_stride);
48
49 for (x = 0; x < w; x += 4) {
50 src_ptr = src + x;
51 dst_ptr = dst + x;
52
53 __asm__ __volatile__(
54 "ulw %[load1], 0(%[src_ptr]) \n\t"
55 "add %[src_ptr], %[src_ptr], %[src_stride] \n\t"
56 "ulw %[load2], 0(%[src_ptr]) \n\t"
57 "add %[src_ptr], %[src_ptr], %[src_stride] \n\t"
58 "ulw %[load3], 0(%[src_ptr]) \n\t"
59 "add %[src_ptr], %[src_ptr], %[src_stride] \n\t"
60 "ulw %[load4], 0(%[src_ptr]) \n\t"
61
62 "mtlo %[vector4a], $ac0 \n\t"
63 "mtlo %[vector4a], $ac1 \n\t"
64 "mtlo %[vector4a], $ac2 \n\t"
65 "mtlo %[vector4a], $ac3 \n\t"
66 "mthi $zero, $ac0 \n\t"
67 "mthi $zero, $ac1 \n\t"
68 "mthi $zero, $ac2 \n\t"
69 "mthi $zero, $ac3 \n\t"
70
71 "preceu.ph.qbr %[scratch1], %[load1] \n\t"
72 "preceu.ph.qbr %[p1], %[load2] \n\t"
73 "precrq.ph.w %[n1], %[p1], %[scratch1] \n\t" /* pixel 2 */
74 "append %[p1], %[scratch1], 16 \n\t" /* pixel 1 */
75 "preceu.ph.qbr %[scratch2], %[load3] \n\t"
76 "preceu.ph.qbr %[p2], %[load4] \n\t"
77 "precrq.ph.w %[n2], %[p2], %[scratch2] \n\t" /* pixel 2 */
78 "append %[p2], %[scratch2], 16 \n\t" /* pixel 1 */
79
80 "dpa.w.ph $ac0, %[p1], %[vector1b] \n\t"
81 "dpa.w.ph $ac0, %[p2], %[vector2b] \n\t"
82 "dpa.w.ph $ac1, %[n1], %[vector1b] \n\t"
83 "dpa.w.ph $ac1, %[n2], %[vector2b] \n\t"
84
85 "preceu.ph.qbl %[scratch1], %[load1] \n\t"
86 "preceu.ph.qbl %[p1], %[load2] \n\t"
87 "precrq.ph.w %[n1], %[p1], %[scratch1] \n\t" /* pixel 2 */
88 "append %[p1], %[scratch1], 16 \n\t" /* pixel 1 */
89 "preceu.ph.qbl %[scratch2], %[load3] \n\t"
90 "preceu.ph.qbl %[p2], %[load4] \n\t"
91 "precrq.ph.w %[n2], %[p2], %[scratch2] \n\t" /* pixel 2 */
92 "append %[p2], %[scratch2], 16 \n\t" /* pixel 1 */
93
94 "dpa.w.ph $ac2, %[p1], %[vector1b] \n\t"
95 "dpa.w.ph $ac2, %[p2], %[vector2b] \n\t"
96 "dpa.w.ph $ac3, %[n1], %[vector1b] \n\t"
97 "dpa.w.ph $ac3, %[n2], %[vector2b] \n\t"
98
99 "add %[src_ptr], %[src_ptr], %[src_stride] \n\t"
100 "ulw %[load1], 0(%[src_ptr]) \n\t"
101 "add %[src_ptr], %[src_ptr], %[src_stride] \n\t"
102 "ulw %[load2], 0(%[src_ptr]) \n\t"
103 "add %[src_ptr], %[src_ptr], %[src_stride] \n\t"
104 "ulw %[load3], 0(%[src_ptr]) \n\t"
105 "add %[src_ptr], %[src_ptr], %[src_stride] \n\t"
106 "ulw %[load4], 0(%[src_ptr]) \n\t"
107
108 "preceu.ph.qbr %[scratch1], %[load1] \n\t"
109 "preceu.ph.qbr %[p1], %[load2] \n\t"
110 "precrq.ph.w %[n1], %[p1], %[scratch1] \n\t" /* pixel 2 */
111 "append %[p1], %[scratch1], 16 \n\t" /* pixel 1 */
112 "preceu.ph.qbr %[scratch2], %[load3] \n\t"
113 "preceu.ph.qbr %[p2], %[load4] \n\t"
114 "precrq.ph.w %[n2], %[p2], %[scratch2] \n\t" /* pixel 2 */
115 "append %[p2], %[scratch2], 16 \n\t" /* pixel 1 */
116
117 "dpa.w.ph $ac0, %[p1], %[vector3b] \n\t"
118 "dpa.w.ph $ac0, %[p2], %[vector4b] \n\t"
119 "extp %[Temp1], $ac0, 31 \n\t"
120 "dpa.w.ph $ac1, %[n1], %[vector3b] \n\t"
121 "dpa.w.ph $ac1, %[n2], %[vector4b] \n\t"
122 "extp %[Temp2], $ac1, 31 \n\t"
123
124 "preceu.ph.qbl %[scratch1], %[load1] \n\t"
125 "preceu.ph.qbl %[p1], %[load2] \n\t"
126 "precrq.ph.w %[n1], %[p1], %[scratch1] \n\t" /* pixel 2 */
127 "append %[p1], %[scratch1], 16 \n\t" /* pixel 1 */
128 "lbu %[scratch1], 0(%[dst_ptr]) \n\t"
129 "preceu.ph.qbl %[scratch2], %[load3] \n\t"
130 "preceu.ph.qbl %[p2], %[load4] \n\t"
131 "precrq.ph.w %[n2], %[p2], %[scratch2] \n\t" /* pixel 2 */
132 "append %[p2], %[scratch2], 16 \n\t" /* pixel 1 */
133 "lbu %[scratch2], 1(%[dst_ptr]) \n\t"
134
135 "lbux %[store1], %[Temp1](%[cm]) \n\t"
136 "dpa.w.ph $ac2, %[p1], %[vector3b] \n\t"
137 "dpa.w.ph $ac2, %[p2], %[vector4b] \n\t"
138 "addqh_r.w %[store1], %[store1], %[scratch1] \n\t" /* pixel 1 */
139 "extp %[Temp1], $ac2, 31 \n\t"
140
141 "lbux %[store2], %[Temp2](%[cm]) \n\t"
142 "dpa.w.ph $ac3, %[n1], %[vector3b] \n\t"
143 "dpa.w.ph $ac3, %[n2], %[vector4b] \n\t"
144 "addqh_r.w %[store2], %[store2], %[scratch2] \n\t" /* pixel 2 */
145 "extp %[Temp2], $ac3, 31 \n\t"
146 "lbu %[scratch1], 2(%[dst_ptr]) \n\t"
147
148 "sb %[store1], 0(%[dst_ptr]) \n\t"
149 "sb %[store2], 1(%[dst_ptr]) \n\t"
150 "lbu %[scratch2], 3(%[dst_ptr]) \n\t"
151
152 "lbux %[store1], %[Temp1](%[cm]) \n\t"
153 "lbux %[store2], %[Temp2](%[cm]) \n\t"
154 "addqh_r.w %[store1], %[store1], %[scratch1] \n\t" /* pixel 3 */
155 "addqh_r.w %[store2], %[store2], %[scratch2] \n\t" /* pixel 4 */
156
157 "sb %[store1], 2(%[dst_ptr]) \n\t"
158 "sb %[store2], 3(%[dst_ptr]) \n\t"
159
160 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3),
161 [load4] "=&r"(load4), [p1] "=&r"(p1), [p2] "=&r"(p2),
162 [n1] "=&r"(n1), [n2] "=&r"(n2), [scratch1] "=&r"(scratch1),
163 [scratch2] "=&r"(scratch2), [Temp1] "=&r"(Temp1),
164 [Temp2] "=&r"(Temp2), [store1] "=&r"(store1),
165 [store2] "=&r"(store2), [src_ptr] "+r"(src_ptr)
166 : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b),
167 [vector3b] "r"(vector3b), [vector4b] "r"(vector4b),
168 [vector4a] "r"(vector4a), [src_stride] "r"(src_stride),
169 [cm] "r"(cm), [dst_ptr] "r"(dst_ptr));
170 }
171
172 /* Next row... */
173 src += src_stride;
174 dst += dst_stride;
175 }
176 }
177
convolve_avg_vert_64_dspr2(const uint8_t * src,int32_t src_stride,uint8_t * dst,int32_t dst_stride,const int16_t * filter_y,int32_t h)178 static void convolve_avg_vert_64_dspr2(const uint8_t *src, int32_t src_stride,
179 uint8_t *dst, int32_t dst_stride,
180 const int16_t *filter_y, int32_t h) {
181 int32_t x, y;
182 const uint8_t *src_ptr;
183 uint8_t *dst_ptr;
184 uint8_t *cm = vpx_ff_cropTbl;
185 uint32_t vector4a = 64;
186 uint32_t load1, load2, load3, load4;
187 uint32_t p1, p2;
188 uint32_t n1, n2;
189 uint32_t scratch1, scratch2;
190 uint32_t store1, store2;
191 int32_t vector1b, vector2b, vector3b, vector4b;
192 int32_t Temp1, Temp2;
193
194 vector1b = ((const int32_t *)filter_y)[0];
195 vector2b = ((const int32_t *)filter_y)[1];
196 vector3b = ((const int32_t *)filter_y)[2];
197 vector4b = ((const int32_t *)filter_y)[3];
198
199 src -= 3 * src_stride;
200
201 for (y = h; y--;) {
202 /* prefetch data to cache memory */
203 prefetch_store(dst + dst_stride);
204 prefetch_store(dst + dst_stride + 32);
205
206 for (x = 0; x < 64; x += 4) {
207 src_ptr = src + x;
208 dst_ptr = dst + x;
209
210 __asm__ __volatile__(
211 "ulw %[load1], 0(%[src_ptr]) \n\t"
212 "add %[src_ptr], %[src_ptr], %[src_stride] \n\t"
213 "ulw %[load2], 0(%[src_ptr]) \n\t"
214 "add %[src_ptr], %[src_ptr], %[src_stride] \n\t"
215 "ulw %[load3], 0(%[src_ptr]) \n\t"
216 "add %[src_ptr], %[src_ptr], %[src_stride] \n\t"
217 "ulw %[load4], 0(%[src_ptr]) \n\t"
218
219 "mtlo %[vector4a], $ac0 \n\t"
220 "mtlo %[vector4a], $ac1 \n\t"
221 "mtlo %[vector4a], $ac2 \n\t"
222 "mtlo %[vector4a], $ac3 \n\t"
223 "mthi $zero, $ac0 \n\t"
224 "mthi $zero, $ac1 \n\t"
225 "mthi $zero, $ac2 \n\t"
226 "mthi $zero, $ac3 \n\t"
227
228 "preceu.ph.qbr %[scratch1], %[load1] \n\t"
229 "preceu.ph.qbr %[p1], %[load2] \n\t"
230 "precrq.ph.w %[n1], %[p1], %[scratch1] \n\t" /* pixel 2 */
231 "append %[p1], %[scratch1], 16 \n\t" /* pixel 1 */
232 "preceu.ph.qbr %[scratch2], %[load3] \n\t"
233 "preceu.ph.qbr %[p2], %[load4] \n\t"
234 "precrq.ph.w %[n2], %[p2], %[scratch2] \n\t" /* pixel 2 */
235 "append %[p2], %[scratch2], 16 \n\t" /* pixel 1 */
236
237 "dpa.w.ph $ac0, %[p1], %[vector1b] \n\t"
238 "dpa.w.ph $ac0, %[p2], %[vector2b] \n\t"
239 "dpa.w.ph $ac1, %[n1], %[vector1b] \n\t"
240 "dpa.w.ph $ac1, %[n2], %[vector2b] \n\t"
241
242 "preceu.ph.qbl %[scratch1], %[load1] \n\t"
243 "preceu.ph.qbl %[p1], %[load2] \n\t"
244 "precrq.ph.w %[n1], %[p1], %[scratch1] \n\t" /* pixel 2 */
245 "append %[p1], %[scratch1], 16 \n\t" /* pixel 1 */
246 "preceu.ph.qbl %[scratch2], %[load3] \n\t"
247 "preceu.ph.qbl %[p2], %[load4] \n\t"
248 "precrq.ph.w %[n2], %[p2], %[scratch2] \n\t" /* pixel 2 */
249 "append %[p2], %[scratch2], 16 \n\t" /* pixel 1 */
250
251 "dpa.w.ph $ac2, %[p1], %[vector1b] \n\t"
252 "dpa.w.ph $ac2, %[p2], %[vector2b] \n\t"
253 "dpa.w.ph $ac3, %[n1], %[vector1b] \n\t"
254 "dpa.w.ph $ac3, %[n2], %[vector2b] \n\t"
255
256 "add %[src_ptr], %[src_ptr], %[src_stride] \n\t"
257 "ulw %[load1], 0(%[src_ptr]) \n\t"
258 "add %[src_ptr], %[src_ptr], %[src_stride] \n\t"
259 "ulw %[load2], 0(%[src_ptr]) \n\t"
260 "add %[src_ptr], %[src_ptr], %[src_stride] \n\t"
261 "ulw %[load3], 0(%[src_ptr]) \n\t"
262 "add %[src_ptr], %[src_ptr], %[src_stride] \n\t"
263 "ulw %[load4], 0(%[src_ptr]) \n\t"
264
265 "preceu.ph.qbr %[scratch1], %[load1] \n\t"
266 "preceu.ph.qbr %[p1], %[load2] \n\t"
267 "precrq.ph.w %[n1], %[p1], %[scratch1] \n\t" /* pixel 2 */
268 "append %[p1], %[scratch1], 16 \n\t" /* pixel 1 */
269 "preceu.ph.qbr %[scratch2], %[load3] \n\t"
270 "preceu.ph.qbr %[p2], %[load4] \n\t"
271 "precrq.ph.w %[n2], %[p2], %[scratch2] \n\t" /* pixel 2 */
272 "append %[p2], %[scratch2], 16 \n\t" /* pixel 1 */
273
274 "dpa.w.ph $ac0, %[p1], %[vector3b] \n\t"
275 "dpa.w.ph $ac0, %[p2], %[vector4b] \n\t"
276 "extp %[Temp1], $ac0, 31 \n\t"
277 "dpa.w.ph $ac1, %[n1], %[vector3b] \n\t"
278 "dpa.w.ph $ac1, %[n2], %[vector4b] \n\t"
279 "extp %[Temp2], $ac1, 31 \n\t"
280
281 "preceu.ph.qbl %[scratch1], %[load1] \n\t"
282 "preceu.ph.qbl %[p1], %[load2] \n\t"
283 "precrq.ph.w %[n1], %[p1], %[scratch1] \n\t" /* pixel 2 */
284 "append %[p1], %[scratch1], 16 \n\t" /* pixel 1 */
285 "lbu %[scratch1], 0(%[dst_ptr]) \n\t"
286 "preceu.ph.qbl %[scratch2], %[load3] \n\t"
287 "preceu.ph.qbl %[p2], %[load4] \n\t"
288 "precrq.ph.w %[n2], %[p2], %[scratch2] \n\t" /* pixel 2 */
289 "append %[p2], %[scratch2], 16 \n\t" /* pixel 1 */
290 "lbu %[scratch2], 1(%[dst_ptr]) \n\t"
291
292 "lbux %[store1], %[Temp1](%[cm]) \n\t"
293 "dpa.w.ph $ac2, %[p1], %[vector3b] \n\t"
294 "dpa.w.ph $ac2, %[p2], %[vector4b] \n\t"
295 "addqh_r.w %[store1], %[store1], %[scratch1] \n\t" /* pixel 1 */
296 "extp %[Temp1], $ac2, 31 \n\t"
297
298 "lbux %[store2], %[Temp2](%[cm]) \n\t"
299 "dpa.w.ph $ac3, %[n1], %[vector3b] \n\t"
300 "dpa.w.ph $ac3, %[n2], %[vector4b] \n\t"
301 "addqh_r.w %[store2], %[store2], %[scratch2] \n\t" /* pixel 2 */
302 "extp %[Temp2], $ac3, 31 \n\t"
303 "lbu %[scratch1], 2(%[dst_ptr]) \n\t"
304
305 "sb %[store1], 0(%[dst_ptr]) \n\t"
306 "sb %[store2], 1(%[dst_ptr]) \n\t"
307 "lbu %[scratch2], 3(%[dst_ptr]) \n\t"
308
309 "lbux %[store1], %[Temp1](%[cm]) \n\t"
310 "lbux %[store2], %[Temp2](%[cm]) \n\t"
311 "addqh_r.w %[store1], %[store1], %[scratch1] \n\t" /* pixel 3 */
312 "addqh_r.w %[store2], %[store2], %[scratch2] \n\t" /* pixel 4 */
313
314 "sb %[store1], 2(%[dst_ptr]) \n\t"
315 "sb %[store2], 3(%[dst_ptr]) \n\t"
316
317 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3),
318 [load4] "=&r"(load4), [p1] "=&r"(p1), [p2] "=&r"(p2),
319 [n1] "=&r"(n1), [n2] "=&r"(n2), [scratch1] "=&r"(scratch1),
320 [scratch2] "=&r"(scratch2), [Temp1] "=&r"(Temp1),
321 [Temp2] "=&r"(Temp2), [store1] "=&r"(store1),
322 [store2] "=&r"(store2), [src_ptr] "+r"(src_ptr)
323 : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b),
324 [vector3b] "r"(vector3b), [vector4b] "r"(vector4b),
325 [vector4a] "r"(vector4a), [src_stride] "r"(src_stride),
326 [cm] "r"(cm), [dst_ptr] "r"(dst_ptr));
327 }
328
329 /* Next row... */
330 src += src_stride;
331 dst += dst_stride;
332 }
333 }
334
vpx_convolve8_avg_vert_dspr2(const uint8_t * src,ptrdiff_t src_stride,uint8_t * dst,ptrdiff_t dst_stride,const int16_t * filter_x,int x_step_q4,const int16_t * filter_y,int y_step_q4,int w,int h)335 void vpx_convolve8_avg_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride,
336 uint8_t *dst, ptrdiff_t dst_stride,
337 const int16_t *filter_x, int x_step_q4,
338 const int16_t *filter_y, int y_step_q4, int w,
339 int h) {
340 assert(y_step_q4 == 16);
341 assert(((const int32_t *)filter_y)[1] != 0x800000);
342
343 if (((const int32_t *)filter_y)[0] == 0) {
344 vpx_convolve2_avg_vert_dspr2(src, src_stride, dst, dst_stride, filter_x,
345 x_step_q4, filter_y, y_step_q4, w, h);
346 } else {
347 uint32_t pos = 38;
348
349 /* bit positon for extract from acc */
350 __asm__ __volatile__("wrdsp %[pos], 1 \n\t"
351 :
352 : [pos] "r"(pos));
353
354 prefetch_store(dst);
355
356 switch (w) {
357 case 4:
358 case 8:
359 case 16:
360 case 32:
361 convolve_avg_vert_4_dspr2(src, src_stride, dst, dst_stride, filter_y, w,
362 h);
363 break;
364 case 64:
365 prefetch_store(dst + 32);
366 convolve_avg_vert_64_dspr2(src, src_stride, dst, dst_stride, filter_y,
367 h);
368 break;
369 default:
370 vpx_convolve8_avg_vert_c(src, src_stride, dst, dst_stride, filter_x,
371 x_step_q4, filter_y, y_step_q4, w, h);
372 break;
373 }
374 }
375 }
376
vpx_convolve8_avg_dspr2(const uint8_t * src,ptrdiff_t src_stride,uint8_t * dst,ptrdiff_t dst_stride,const int16_t * filter_x,int x_step_q4,const int16_t * filter_y,int y_step_q4,int w,int h)377 void vpx_convolve8_avg_dspr2(const uint8_t *src, ptrdiff_t src_stride,
378 uint8_t *dst, ptrdiff_t dst_stride,
379 const int16_t *filter_x, int x_step_q4,
380 const int16_t *filter_y, int y_step_q4, int w,
381 int h) {
382 /* Fixed size intermediate buffer places limits on parameters. */
383 DECLARE_ALIGNED(32, uint8_t, temp[64 * 135]);
384 int32_t intermediate_height = ((h * y_step_q4) >> 4) + 7;
385
386 assert(w <= 64);
387 assert(h <= 64);
388 assert(x_step_q4 == 16);
389 assert(y_step_q4 == 16);
390
391 if (intermediate_height < h) intermediate_height = h;
392
393 vpx_convolve8_horiz(src - (src_stride * 3), src_stride, temp, 64, filter_x,
394 x_step_q4, filter_y, y_step_q4, w, intermediate_height);
395
396 vpx_convolve8_avg_vert(temp + 64 * 3, 64, dst, dst_stride, filter_x,
397 x_step_q4, filter_y, y_step_q4, w, h);
398 }
399
vpx_convolve_avg_dspr2(const uint8_t * src,ptrdiff_t src_stride,uint8_t * dst,ptrdiff_t dst_stride,const int16_t * filter_x,int filter_x_stride,const int16_t * filter_y,int filter_y_stride,int w,int h)400 void vpx_convolve_avg_dspr2(const uint8_t *src, ptrdiff_t src_stride,
401 uint8_t *dst, ptrdiff_t dst_stride,
402 const int16_t *filter_x, int filter_x_stride,
403 const int16_t *filter_y, int filter_y_stride, int w,
404 int h) {
405 int x, y;
406 uint32_t tp1, tp2, tn1, tp3, tp4, tn2;
407 (void)filter_x;
408 (void)filter_x_stride;
409 (void)filter_y;
410 (void)filter_y_stride;
411
412 /* prefetch data to cache memory */
413 prefetch_load(src);
414 prefetch_load(src + 32);
415 prefetch_store(dst);
416
417 switch (w) {
418 case 4:
419 /* 1 word storage */
420 for (y = h; y--;) {
421 prefetch_load(src + src_stride);
422 prefetch_load(src + src_stride + 32);
423 prefetch_store(dst + dst_stride);
424
425 __asm__ __volatile__(
426 "ulw %[tp1], 0(%[src]) \n\t"
427 "ulw %[tp2], 0(%[dst]) \n\t"
428 "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */
429 "sw %[tn1], 0(%[dst]) \n\t" /* store */
430
431 : [tn1] "=&r"(tn1), [tp1] "=&r"(tp1), [tp2] "=&r"(tp2)
432 : [src] "r"(src), [dst] "r"(dst));
433
434 src += src_stride;
435 dst += dst_stride;
436 }
437 break;
438 case 8:
439 /* 2 word storage */
440 for (y = h; y--;) {
441 prefetch_load(src + src_stride);
442 prefetch_load(src + src_stride + 32);
443 prefetch_store(dst + dst_stride);
444
445 __asm__ __volatile__(
446 "ulw %[tp1], 0(%[src]) \n\t"
447 "ulw %[tp2], 0(%[dst]) \n\t"
448 "ulw %[tp3], 4(%[src]) \n\t"
449 "ulw %[tp4], 4(%[dst]) \n\t"
450 "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */
451 "sw %[tn1], 0(%[dst]) \n\t" /* store */
452 "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */
453 "sw %[tn2], 4(%[dst]) \n\t" /* store */
454
455 : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tp3] "=&r"(tp3),
456 [tp4] "=&r"(tp4), [tn1] "=&r"(tn1), [tn2] "=&r"(tn2)
457 : [src] "r"(src), [dst] "r"(dst));
458
459 src += src_stride;
460 dst += dst_stride;
461 }
462 break;
463 case 16:
464 /* 4 word storage */
465 for (y = h; y--;) {
466 prefetch_load(src + src_stride);
467 prefetch_load(src + src_stride + 32);
468 prefetch_store(dst + dst_stride);
469
470 __asm__ __volatile__(
471 "ulw %[tp1], 0(%[src]) \n\t"
472 "ulw %[tp2], 0(%[dst]) \n\t"
473 "ulw %[tp3], 4(%[src]) \n\t"
474 "ulw %[tp4], 4(%[dst]) \n\t"
475 "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */
476 "ulw %[tp1], 8(%[src]) \n\t"
477 "ulw %[tp2], 8(%[dst]) \n\t"
478 "sw %[tn1], 0(%[dst]) \n\t" /* store */
479 "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */
480 "sw %[tn2], 4(%[dst]) \n\t" /* store */
481 "ulw %[tp3], 12(%[src]) \n\t"
482 "ulw %[tp4], 12(%[dst]) \n\t"
483 "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */
484 "sw %[tn1], 8(%[dst]) \n\t" /* store */
485 "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */
486 "sw %[tn2], 12(%[dst]) \n\t" /* store */
487
488 : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tp3] "=&r"(tp3),
489 [tp4] "=&r"(tp4), [tn1] "=&r"(tn1), [tn2] "=&r"(tn2)
490 : [src] "r"(src), [dst] "r"(dst));
491
492 src += src_stride;
493 dst += dst_stride;
494 }
495 break;
496 case 32:
497 /* 8 word storage */
498 for (y = h; y--;) {
499 prefetch_load(src + src_stride);
500 prefetch_load(src + src_stride + 32);
501 prefetch_store(dst + dst_stride);
502
503 __asm__ __volatile__(
504 "ulw %[tp1], 0(%[src]) \n\t"
505 "ulw %[tp2], 0(%[dst]) \n\t"
506 "ulw %[tp3], 4(%[src]) \n\t"
507 "ulw %[tp4], 4(%[dst]) \n\t"
508 "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */
509 "ulw %[tp1], 8(%[src]) \n\t"
510 "ulw %[tp2], 8(%[dst]) \n\t"
511 "sw %[tn1], 0(%[dst]) \n\t" /* store */
512 "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */
513 "sw %[tn2], 4(%[dst]) \n\t" /* store */
514 "ulw %[tp3], 12(%[src]) \n\t"
515 "ulw %[tp4], 12(%[dst]) \n\t"
516 "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */
517 "ulw %[tp1], 16(%[src]) \n\t"
518 "ulw %[tp2], 16(%[dst]) \n\t"
519 "sw %[tn1], 8(%[dst]) \n\t" /* store */
520 "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */
521 "sw %[tn2], 12(%[dst]) \n\t" /* store */
522 "ulw %[tp3], 20(%[src]) \n\t"
523 "ulw %[tp4], 20(%[dst]) \n\t"
524 "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */
525 "ulw %[tp1], 24(%[src]) \n\t"
526 "ulw %[tp2], 24(%[dst]) \n\t"
527 "sw %[tn1], 16(%[dst]) \n\t" /* store */
528 "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */
529 "sw %[tn2], 20(%[dst]) \n\t" /* store */
530 "ulw %[tp3], 28(%[src]) \n\t"
531 "ulw %[tp4], 28(%[dst]) \n\t"
532 "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */
533 "sw %[tn1], 24(%[dst]) \n\t" /* store */
534 "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */
535 "sw %[tn2], 28(%[dst]) \n\t" /* store */
536
537 : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tp3] "=&r"(tp3),
538 [tp4] "=&r"(tp4), [tn1] "=&r"(tn1), [tn2] "=&r"(tn2)
539 : [src] "r"(src), [dst] "r"(dst));
540
541 src += src_stride;
542 dst += dst_stride;
543 }
544 break;
545 case 64:
546 prefetch_load(src + 64);
547 prefetch_store(dst + 32);
548
549 /* 16 word storage */
550 for (y = h; y--;) {
551 prefetch_load(src + src_stride);
552 prefetch_load(src + src_stride + 32);
553 prefetch_load(src + src_stride + 64);
554 prefetch_store(dst + dst_stride);
555 prefetch_store(dst + dst_stride + 32);
556
557 __asm__ __volatile__(
558 "ulw %[tp1], 0(%[src]) \n\t"
559 "ulw %[tp2], 0(%[dst]) \n\t"
560 "ulw %[tp3], 4(%[src]) \n\t"
561 "ulw %[tp4], 4(%[dst]) \n\t"
562 "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */
563 "ulw %[tp1], 8(%[src]) \n\t"
564 "ulw %[tp2], 8(%[dst]) \n\t"
565 "sw %[tn1], 0(%[dst]) \n\t" /* store */
566 "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */
567 "sw %[tn2], 4(%[dst]) \n\t" /* store */
568 "ulw %[tp3], 12(%[src]) \n\t"
569 "ulw %[tp4], 12(%[dst]) \n\t"
570 "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */
571 "ulw %[tp1], 16(%[src]) \n\t"
572 "ulw %[tp2], 16(%[dst]) \n\t"
573 "sw %[tn1], 8(%[dst]) \n\t" /* store */
574 "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */
575 "sw %[tn2], 12(%[dst]) \n\t" /* store */
576 "ulw %[tp3], 20(%[src]) \n\t"
577 "ulw %[tp4], 20(%[dst]) \n\t"
578 "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */
579 "ulw %[tp1], 24(%[src]) \n\t"
580 "ulw %[tp2], 24(%[dst]) \n\t"
581 "sw %[tn1], 16(%[dst]) \n\t" /* store */
582 "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */
583 "sw %[tn2], 20(%[dst]) \n\t" /* store */
584 "ulw %[tp3], 28(%[src]) \n\t"
585 "ulw %[tp4], 28(%[dst]) \n\t"
586 "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */
587 "ulw %[tp1], 32(%[src]) \n\t"
588 "ulw %[tp2], 32(%[dst]) \n\t"
589 "sw %[tn1], 24(%[dst]) \n\t" /* store */
590 "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */
591 "sw %[tn2], 28(%[dst]) \n\t" /* store */
592 "ulw %[tp3], 36(%[src]) \n\t"
593 "ulw %[tp4], 36(%[dst]) \n\t"
594 "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */
595 "ulw %[tp1], 40(%[src]) \n\t"
596 "ulw %[tp2], 40(%[dst]) \n\t"
597 "sw %[tn1], 32(%[dst]) \n\t" /* store */
598 "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */
599 "sw %[tn2], 36(%[dst]) \n\t" /* store */
600 "ulw %[tp3], 44(%[src]) \n\t"
601 "ulw %[tp4], 44(%[dst]) \n\t"
602 "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */
603 "ulw %[tp1], 48(%[src]) \n\t"
604 "ulw %[tp2], 48(%[dst]) \n\t"
605 "sw %[tn1], 40(%[dst]) \n\t" /* store */
606 "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */
607 "sw %[tn2], 44(%[dst]) \n\t" /* store */
608 "ulw %[tp3], 52(%[src]) \n\t"
609 "ulw %[tp4], 52(%[dst]) \n\t"
610 "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */
611 "ulw %[tp1], 56(%[src]) \n\t"
612 "ulw %[tp2], 56(%[dst]) \n\t"
613 "sw %[tn1], 48(%[dst]) \n\t" /* store */
614 "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */
615 "sw %[tn2], 52(%[dst]) \n\t" /* store */
616 "ulw %[tp3], 60(%[src]) \n\t"
617 "ulw %[tp4], 60(%[dst]) \n\t"
618 "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */
619 "sw %[tn1], 56(%[dst]) \n\t" /* store */
620 "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */
621 "sw %[tn2], 60(%[dst]) \n\t" /* store */
622
623 : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tp3] "=&r"(tp3),
624 [tp4] "=&r"(tp4), [tn1] "=&r"(tn1), [tn2] "=&r"(tn2)
625 : [src] "r"(src), [dst] "r"(dst));
626
627 src += src_stride;
628 dst += dst_stride;
629 }
630 break;
631 default:
632 for (y = h; y > 0; --y) {
633 for (x = 0; x < w; ++x) {
634 dst[x] = (dst[x] + src[x] + 1) >> 1;
635 }
636
637 src += src_stride;
638 dst += dst_stride;
639 }
640 break;
641 }
642 }
643 #endif
644