1 /********************************************************************
2 * *
3 * THIS FILE IS PART OF THE OggTheora SOFTWARE CODEC SOURCE CODE. *
4 * USE, DISTRIBUTION AND REPRODUCTION OF THIS LIBRARY SOURCE IS *
5 * GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE *
6 * IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING. *
7 * *
8 * THE Theora SOURCE CODE IS COPYRIGHT (C) 2002-2009 *
9 * by the Xiph.Org Foundation and contributors http://www.xiph.org/ *
10 * *
11 ********************************************************************
12
13 function:
14 last mod: $Id: mmxfrag.c 16503 2009-08-22 18:14:02Z giles $
15
16 ********************************************************************/
17
18 /*MMX acceleration of fragment reconstruction for motion compensation.
19 Originally written by Rudolf Marek.
20 Additional optimization by Nils Pipenbrinck.
21 Note: Loops are unrolled for best performance.
22 The iteration each instruction belongs to is marked in the comments as #i.*/
23 #include <stddef.h>
24 #include "x86int.h"
25 #include "mmxfrag.h"
26
27 #if defined(OC_X86_ASM)
28
29 /*Copies an 8x8 block of pixels from _src to _dst, assuming _ystride bytes
30 between rows.*/
oc_frag_copy_mmx(unsigned char * _dst,const unsigned char * _src,int _ystride)31 void oc_frag_copy_mmx(unsigned char *_dst,
32 const unsigned char *_src,int _ystride){
33 OC_FRAG_COPY_MMX(_dst,_src,_ystride);
34 }
35
oc_frag_recon_intra_mmx(unsigned char * _dst,int _ystride,const ogg_int16_t * _residue)36 void oc_frag_recon_intra_mmx(unsigned char *_dst,int _ystride,
37 const ogg_int16_t *_residue){
38 __asm__ __volatile__(
39 /*Set mm0 to 0xFFFFFFFFFFFFFFFF.*/
40 "pcmpeqw %%mm0,%%mm0\n\t"
41 /*#0 Load low residue.*/
42 "movq 0*8(%[residue]),%%mm1\n\t"
43 /*#0 Load high residue.*/
44 "movq 1*8(%[residue]),%%mm2\n\t"
45 /*Set mm0 to 0x8000800080008000.*/
46 "psllw $15,%%mm0\n\t"
47 /*#1 Load low residue.*/
48 "movq 2*8(%[residue]),%%mm3\n\t"
49 /*#1 Load high residue.*/
50 "movq 3*8(%[residue]),%%mm4\n\t"
51 /*Set mm0 to 0x0080008000800080.*/
52 "psrlw $8,%%mm0\n\t"
53 /*#2 Load low residue.*/
54 "movq 4*8(%[residue]),%%mm5\n\t"
55 /*#2 Load high residue.*/
56 "movq 5*8(%[residue]),%%mm6\n\t"
57 /*#0 Bias low residue.*/
58 "paddsw %%mm0,%%mm1\n\t"
59 /*#0 Bias high residue.*/
60 "paddsw %%mm0,%%mm2\n\t"
61 /*#0 Pack to byte.*/
62 "packuswb %%mm2,%%mm1\n\t"
63 /*#1 Bias low residue.*/
64 "paddsw %%mm0,%%mm3\n\t"
65 /*#1 Bias high residue.*/
66 "paddsw %%mm0,%%mm4\n\t"
67 /*#1 Pack to byte.*/
68 "packuswb %%mm4,%%mm3\n\t"
69 /*#2 Bias low residue.*/
70 "paddsw %%mm0,%%mm5\n\t"
71 /*#2 Bias high residue.*/
72 "paddsw %%mm0,%%mm6\n\t"
73 /*#2 Pack to byte.*/
74 "packuswb %%mm6,%%mm5\n\t"
75 /*#0 Write row.*/
76 "movq %%mm1,(%[dst])\n\t"
77 /*#1 Write row.*/
78 "movq %%mm3,(%[dst],%[ystride])\n\t"
79 /*#2 Write row.*/
80 "movq %%mm5,(%[dst],%[ystride],2)\n\t"
81 /*#3 Load low residue.*/
82 "movq 6*8(%[residue]),%%mm1\n\t"
83 /*#3 Load high residue.*/
84 "movq 7*8(%[residue]),%%mm2\n\t"
85 /*#4 Load high residue.*/
86 "movq 8*8(%[residue]),%%mm3\n\t"
87 /*#4 Load high residue.*/
88 "movq 9*8(%[residue]),%%mm4\n\t"
89 /*#5 Load high residue.*/
90 "movq 10*8(%[residue]),%%mm5\n\t"
91 /*#5 Load high residue.*/
92 "movq 11*8(%[residue]),%%mm6\n\t"
93 /*#3 Bias low residue.*/
94 "paddsw %%mm0,%%mm1\n\t"
95 /*#3 Bias high residue.*/
96 "paddsw %%mm0,%%mm2\n\t"
97 /*#3 Pack to byte.*/
98 "packuswb %%mm2,%%mm1\n\t"
99 /*#4 Bias low residue.*/
100 "paddsw %%mm0,%%mm3\n\t"
101 /*#4 Bias high residue.*/
102 "paddsw %%mm0,%%mm4\n\t"
103 /*#4 Pack to byte.*/
104 "packuswb %%mm4,%%mm3\n\t"
105 /*#5 Bias low residue.*/
106 "paddsw %%mm0,%%mm5\n\t"
107 /*#5 Bias high residue.*/
108 "paddsw %%mm0,%%mm6\n\t"
109 /*#5 Pack to byte.*/
110 "packuswb %%mm6,%%mm5\n\t"
111 /*#3 Write row.*/
112 "movq %%mm1,(%[dst],%[ystride3])\n\t"
113 /*#4 Write row.*/
114 "movq %%mm3,(%[dst4])\n\t"
115 /*#5 Write row.*/
116 "movq %%mm5,(%[dst4],%[ystride])\n\t"
117 /*#6 Load low residue.*/
118 "movq 12*8(%[residue]),%%mm1\n\t"
119 /*#6 Load high residue.*/
120 "movq 13*8(%[residue]),%%mm2\n\t"
121 /*#7 Load low residue.*/
122 "movq 14*8(%[residue]),%%mm3\n\t"
123 /*#7 Load high residue.*/
124 "movq 15*8(%[residue]),%%mm4\n\t"
125 /*#6 Bias low residue.*/
126 "paddsw %%mm0,%%mm1\n\t"
127 /*#6 Bias high residue.*/
128 "paddsw %%mm0,%%mm2\n\t"
129 /*#6 Pack to byte.*/
130 "packuswb %%mm2,%%mm1\n\t"
131 /*#7 Bias low residue.*/
132 "paddsw %%mm0,%%mm3\n\t"
133 /*#7 Bias high residue.*/
134 "paddsw %%mm0,%%mm4\n\t"
135 /*#7 Pack to byte.*/
136 "packuswb %%mm4,%%mm3\n\t"
137 /*#6 Write row.*/
138 "movq %%mm1,(%[dst4],%[ystride],2)\n\t"
139 /*#7 Write row.*/
140 "movq %%mm3,(%[dst4],%[ystride3])\n\t"
141 :
142 :[residue]"r"(_residue),
143 [dst]"r"(_dst),
144 [dst4]"r"(_dst+(_ystride<<2)),
145 [ystride]"r"((ptrdiff_t)_ystride),
146 [ystride3]"r"((ptrdiff_t)_ystride*3)
147 :"memory"
148 );
149 }
150
oc_frag_recon_inter_mmx(unsigned char * _dst,const unsigned char * _src,int _ystride,const ogg_int16_t * _residue)151 void oc_frag_recon_inter_mmx(unsigned char *_dst,const unsigned char *_src,
152 int _ystride,const ogg_int16_t *_residue){
153 int i;
154 /*Zero mm0.*/
155 __asm__ __volatile__("pxor %%mm0,%%mm0\n\t"::);
156 for(i=4;i-->0;){
157 __asm__ __volatile__(
158 /*#0 Load source.*/
159 "movq (%[src]),%%mm3\n\t"
160 /*#1 Load source.*/
161 "movq (%[src],%[ystride]),%%mm7\n\t"
162 /*#0 Get copy of src.*/
163 "movq %%mm3,%%mm4\n\t"
164 /*#0 Expand high source.*/
165 "punpckhbw %%mm0,%%mm4\n\t"
166 /*#0 Expand low source.*/
167 "punpcklbw %%mm0,%%mm3\n\t"
168 /*#0 Add residue high.*/
169 "paddsw 8(%[residue]),%%mm4\n\t"
170 /*#1 Get copy of src.*/
171 "movq %%mm7,%%mm2\n\t"
172 /*#0 Add residue low.*/
173 "paddsw (%[residue]), %%mm3\n\t"
174 /*#1 Expand high source.*/
175 "punpckhbw %%mm0,%%mm2\n\t"
176 /*#0 Pack final row pixels.*/
177 "packuswb %%mm4,%%mm3\n\t"
178 /*#1 Expand low source.*/
179 "punpcklbw %%mm0,%%mm7\n\t"
180 /*#1 Add residue low.*/
181 "paddsw 16(%[residue]),%%mm7\n\t"
182 /*#1 Add residue high.*/
183 "paddsw 24(%[residue]),%%mm2\n\t"
184 /*Advance residue.*/
185 "lea 32(%[residue]),%[residue]\n\t"
186 /*#1 Pack final row pixels.*/
187 "packuswb %%mm2,%%mm7\n\t"
188 /*Advance src.*/
189 "lea (%[src],%[ystride],2),%[src]\n\t"
190 /*#0 Write row.*/
191 "movq %%mm3,(%[dst])\n\t"
192 /*#1 Write row.*/
193 "movq %%mm7,(%[dst],%[ystride])\n\t"
194 /*Advance dst.*/
195 "lea (%[dst],%[ystride],2),%[dst]\n\t"
196 :[residue]"+r"(_residue),[dst]"+r"(_dst),[src]"+r"(_src)
197 :[ystride]"r"((ptrdiff_t)_ystride)
198 :"memory"
199 );
200 }
201 }
202
oc_frag_recon_inter2_mmx(unsigned char * _dst,const unsigned char * _src1,const unsigned char * _src2,int _ystride,const ogg_int16_t * _residue)203 void oc_frag_recon_inter2_mmx(unsigned char *_dst,const unsigned char *_src1,
204 const unsigned char *_src2,int _ystride,const ogg_int16_t *_residue){
205 int i;
206 /*Zero mm7.*/
207 __asm__ __volatile__("pxor %%mm7,%%mm7\n\t"::);
208 for(i=4;i-->0;){
209 __asm__ __volatile__(
210 /*#0 Load src1.*/
211 "movq (%[src1]),%%mm0\n\t"
212 /*#0 Load src2.*/
213 "movq (%[src2]),%%mm2\n\t"
214 /*#0 Copy src1.*/
215 "movq %%mm0,%%mm1\n\t"
216 /*#0 Copy src2.*/
217 "movq %%mm2,%%mm3\n\t"
218 /*#1 Load src1.*/
219 "movq (%[src1],%[ystride]),%%mm4\n\t"
220 /*#0 Unpack lower src1.*/
221 "punpcklbw %%mm7,%%mm0\n\t"
222 /*#1 Load src2.*/
223 "movq (%[src2],%[ystride]),%%mm5\n\t"
224 /*#0 Unpack higher src1.*/
225 "punpckhbw %%mm7,%%mm1\n\t"
226 /*#0 Unpack lower src2.*/
227 "punpcklbw %%mm7,%%mm2\n\t"
228 /*#0 Unpack higher src2.*/
229 "punpckhbw %%mm7,%%mm3\n\t"
230 /*Advance src1 ptr.*/
231 "lea (%[src1],%[ystride],2),%[src1]\n\t"
232 /*Advance src2 ptr.*/
233 "lea (%[src2],%[ystride],2),%[src2]\n\t"
234 /*#0 Lower src1+src2.*/
235 "paddsw %%mm2,%%mm0\n\t"
236 /*#0 Higher src1+src2.*/
237 "paddsw %%mm3,%%mm1\n\t"
238 /*#1 Copy src1.*/
239 "movq %%mm4,%%mm2\n\t"
240 /*#0 Build lo average.*/
241 "psraw $1,%%mm0\n\t"
242 /*#1 Copy src2.*/
243 "movq %%mm5,%%mm3\n\t"
244 /*#1 Unpack lower src1.*/
245 "punpcklbw %%mm7,%%mm4\n\t"
246 /*#0 Build hi average.*/
247 "psraw $1,%%mm1\n\t"
248 /*#1 Unpack higher src1.*/
249 "punpckhbw %%mm7,%%mm2\n\t"
250 /*#0 low+=residue.*/
251 "paddsw (%[residue]),%%mm0\n\t"
252 /*#1 Unpack lower src2.*/
253 "punpcklbw %%mm7,%%mm5\n\t"
254 /*#0 high+=residue.*/
255 "paddsw 8(%[residue]),%%mm1\n\t"
256 /*#1 Unpack higher src2.*/
257 "punpckhbw %%mm7,%%mm3\n\t"
258 /*#1 Lower src1+src2.*/
259 "paddsw %%mm4,%%mm5\n\t"
260 /*#0 Pack and saturate.*/
261 "packuswb %%mm1,%%mm0\n\t"
262 /*#1 Higher src1+src2.*/
263 "paddsw %%mm2,%%mm3\n\t"
264 /*#0 Write row.*/
265 "movq %%mm0,(%[dst])\n\t"
266 /*#1 Build lo average.*/
267 "psraw $1,%%mm5\n\t"
268 /*#1 Build hi average.*/
269 "psraw $1,%%mm3\n\t"
270 /*#1 low+=residue.*/
271 "paddsw 16(%[residue]),%%mm5\n\t"
272 /*#1 high+=residue.*/
273 "paddsw 24(%[residue]),%%mm3\n\t"
274 /*#1 Pack and saturate.*/
275 "packuswb %%mm3,%%mm5\n\t"
276 /*#1 Write row ptr.*/
277 "movq %%mm5,(%[dst],%[ystride])\n\t"
278 /*Advance residue ptr.*/
279 "add $32,%[residue]\n\t"
280 /*Advance dest ptr.*/
281 "lea (%[dst],%[ystride],2),%[dst]\n\t"
282 :[dst]"+r"(_dst),[residue]"+r"(_residue),
283 [src1]"+%r"(_src1),[src2]"+r"(_src2)
284 :[ystride]"r"((ptrdiff_t)_ystride)
285 :"memory"
286 );
287 }
288 }
289
oc_restore_fpu_mmx(void)290 void oc_restore_fpu_mmx(void){
291 __asm__ __volatile__("emms\n\t");
292 }
293 #endif
294