1 /*****************************************************************************
2 * dct.c: ppc transform and zigzag
3 *****************************************************************************
4 * Copyright (C) 2003-2021 x264 project
5 *
6 * Authors: Guillaume Poirier <gpoirier@mplayerhq.hu>
7 * Eric Petit <eric.petit@lapsus.org>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
22 *
23 * This program is also available under a commercial proprietary license.
24 * For more information, contact us at licensing@x264.com.
25 *****************************************************************************/
26
27 #include "common/common.h"
28 #include "ppccommon.h"
29 #include "dct.h"
30
31 #if !HIGH_BIT_DEPTH
32 #define VEC_DCT(a0,a1,a2,a3,b0,b1,b2,b3) \
33 b1 = vec_add( a0, a3 ); \
34 b3 = vec_add( a1, a2 ); \
35 b0 = vec_add( b1, b3 ); \
36 b2 = vec_sub( b1, b3 ); \
37 a0 = vec_sub( a0, a3 ); \
38 a1 = vec_sub( a1, a2 ); \
39 b1 = vec_add( a0, a0 ); \
40 b1 = vec_add( b1, a1 ); \
41 b3 = vec_sub( a0, a1 ); \
42 b3 = vec_sub( b3, a1 )
43
x264_sub4x4_dct_altivec(int16_t dct[16],uint8_t * pix1,uint8_t * pix2)44 void x264_sub4x4_dct_altivec( int16_t dct[16], uint8_t *pix1, uint8_t *pix2 )
45 {
46 PREP_DIFF_8BYTEALIGNED;
47 vec_s16_t dct0v, dct1v, dct2v, dct3v;
48 vec_s16_t tmp0v, tmp1v, tmp2v, tmp3v;
49
50 vec_u8_t permHighv;
51
52 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 4, dct0v );
53 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 4, dct1v );
54 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 4, dct2v );
55 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 4, dct3v );
56 VEC_DCT( dct0v, dct1v, dct2v, dct3v, tmp0v, tmp1v, tmp2v, tmp3v );
57 VEC_TRANSPOSE_4( tmp0v, tmp1v, tmp2v, tmp3v,
58 dct0v, dct1v, dct2v, dct3v );
59 permHighv = (vec_u8_t) CV(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17);
60 VEC_DCT( dct0v, dct1v, dct2v, dct3v, tmp0v, tmp1v, tmp2v, tmp3v );
61
62 vec_st(vec_perm(tmp0v, tmp1v, permHighv), 0, dct);
63 vec_st(vec_perm(tmp2v, tmp3v, permHighv), 16, dct);
64 }
65
x264_sub8x8_dct_altivec(int16_t dct[4][16],uint8_t * pix1,uint8_t * pix2)66 void x264_sub8x8_dct_altivec( int16_t dct[4][16], uint8_t *pix1, uint8_t *pix2 )
67 {
68 PREP_DIFF_8BYTEALIGNED;
69 vec_s16_t dct0v, dct1v, dct2v, dct3v, dct4v, dct5v, dct6v, dct7v;
70 vec_s16_t tmp0v, tmp1v, tmp2v, tmp3v, tmp4v, tmp5v, tmp6v, tmp7v;
71
72 vec_u8_t permHighv, permLowv;
73
74 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct0v );
75 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct1v );
76 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct2v );
77 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct3v );
78 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct4v );
79 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct5v );
80 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct6v );
81 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct7v );
82 VEC_DCT( dct0v, dct1v, dct2v, dct3v, tmp0v, tmp1v, tmp2v, tmp3v );
83 VEC_DCT( dct4v, dct5v, dct6v, dct7v, tmp4v, tmp5v, tmp6v, tmp7v );
84 VEC_TRANSPOSE_8( tmp0v, tmp1v, tmp2v, tmp3v,
85 tmp4v, tmp5v, tmp6v, tmp7v,
86 dct0v, dct1v, dct2v, dct3v,
87 dct4v, dct5v, dct6v, dct7v );
88
89 permHighv = (vec_u8_t) CV(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17);
90 permLowv = (vec_u8_t) CV(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F);
91
92 VEC_DCT( dct0v, dct1v, dct2v, dct3v, tmp0v, tmp1v, tmp2v, tmp3v );
93 VEC_DCT( dct4v, dct5v, dct6v, dct7v, tmp4v, tmp5v, tmp6v, tmp7v );
94
95 vec_st(vec_perm(tmp0v, tmp1v, permHighv), 0, *dct);
96 vec_st(vec_perm(tmp2v, tmp3v, permHighv), 16, *dct);
97 vec_st(vec_perm(tmp4v, tmp5v, permHighv), 32, *dct);
98 vec_st(vec_perm(tmp6v, tmp7v, permHighv), 48, *dct);
99 vec_st(vec_perm(tmp0v, tmp1v, permLowv), 64, *dct);
100 vec_st(vec_perm(tmp2v, tmp3v, permLowv), 80, *dct);
101 vec_st(vec_perm(tmp4v, tmp5v, permLowv), 96, *dct);
102 vec_st(vec_perm(tmp6v, tmp7v, permLowv), 112, *dct);
103 }
104
x264_sub16x16_dct_altivec(int16_t dct[16][16],uint8_t * pix1,uint8_t * pix2)105 void x264_sub16x16_dct_altivec( int16_t dct[16][16], uint8_t *pix1, uint8_t *pix2 )
106 {
107 x264_sub8x8_dct_altivec( &dct[ 0], &pix1[0], &pix2[0] );
108 x264_sub8x8_dct_altivec( &dct[ 4], &pix1[8], &pix2[8] );
109 x264_sub8x8_dct_altivec( &dct[ 8], &pix1[8*FENC_STRIDE+0], &pix2[8*FDEC_STRIDE+0] );
110 x264_sub8x8_dct_altivec( &dct[12], &pix1[8*FENC_STRIDE+8], &pix2[8*FDEC_STRIDE+8] );
111 }
112
113 /***************************************************************************
114 * 8x8 transform:
115 ***************************************************************************/
116
pix_diff(uint8_t * p1,uint8_t * p2,vec_s16_t * diff,int i)117 static void pix_diff( uint8_t *p1, uint8_t *p2, vec_s16_t *diff, int i )
118 {
119 vec_s16_t pix1v, pix2v, tmp[4];
120 vec_u8_t pix1v8, pix2v8;
121 LOAD_ZERO;
122
123 for( int j = 0; j < 4; j++ )
124 {
125 pix1v8 = vec_vsx_ld( 0, p1 );
126 pix2v8 = vec_vsx_ld( 0, p2 );
127 pix1v = vec_u8_to_s16_h( pix1v8 );
128 pix2v = vec_u8_to_s16_h( pix2v8 );
129 tmp[j] = vec_sub( pix1v, pix2v );
130 p1 += FENC_STRIDE;
131 p2 += FDEC_STRIDE;
132 }
133 diff[i] = vec_add( tmp[0], tmp[1] );
134 diff[i] = vec_add( diff[i], tmp[2] );
135 diff[i] = vec_add( diff[i], tmp[3] );
136 }
137
x264_sub8x8_dct_dc_altivec(int16_t dct[4],uint8_t * pix1,uint8_t * pix2)138 void x264_sub8x8_dct_dc_altivec( int16_t dct[4], uint8_t *pix1, uint8_t *pix2 )
139 {
140 vec_s16_t diff[2], tmp;
141 vec_s32_t sum[2];
142 vec_s32_t zero32 = vec_splat_s32(0);
143 vec_u8_t mask = { 0x00, 0x01, 0x00, 0x01, 0x04, 0x05, 0x04, 0x05,
144 0x02, 0x03, 0x02, 0x03, 0x06, 0x07, 0x06, 0x07 };
145
146 pix_diff( &pix1[0], &pix2[0], diff, 0 );
147 pix_diff( &pix1[4*FENC_STRIDE], &pix2[4*FDEC_STRIDE], diff, 1 );
148
149 sum[0] = vec_sum4s( diff[0], zero32 );
150 sum[1] = vec_sum4s( diff[1], zero32 );
151 diff[0] = vec_packs( sum[0], sum[1] );
152 sum[0] = vec_sum4s( diff[0], zero32 );
153 diff[0] = vec_packs( sum[0], zero32 );
154
155 diff[0] = vec_perm( diff[0], diff[0], mask ); // 0 0 2 2 1 1 3 3
156 tmp = xxpermdi( diff[0], diff[0], 2 ); // 1 1 3 3 0 0 2 2
157 diff[1] = vec_add( diff[0], tmp ); // 0+1 0+1 2+3 2+3
158 diff[0] = vec_sub( diff[0], tmp ); // 0-1 0-1 2-3 2-3
159 tmp = vec_mergeh( diff[1], diff[0] ); // 0+1 0-1 0+1 0-1 2+3 2-3 2+3 2-3
160 diff[0] = xxpermdi( tmp, tmp, 2 ); // 2+3 2-3 2+3 2-3
161 diff[1] = vec_add( tmp, diff[0] ); // 0+1+2+3 0-1+2+3
162 diff[0] = vec_sub( tmp, diff[0] ); // 0+1-2-3 0-1-2+3
163 diff[0] = vec_mergeh( diff[1], diff[0] );
164
165 diff[1] = vec_ld( 0, dct );
166 diff[0] = xxpermdi( diff[0], diff[1], 0 );
167 vec_st( diff[0], 0, dct );
168 }
169
170 /* DCT8_1D unrolled by 8 in Altivec */
171 #define DCT8_1D_ALTIVEC( dct0v, dct1v, dct2v, dct3v, dct4v, dct5v, dct6v, dct7v ) \
172 { \
173 /* int s07 = SRC(0) + SRC(7); */ \
174 vec_s16_t s07v = vec_add( dct0v, dct7v); \
175 /* int s16 = SRC(1) + SRC(6); */ \
176 vec_s16_t s16v = vec_add( dct1v, dct6v); \
177 /* int s25 = SRC(2) + SRC(5); */ \
178 vec_s16_t s25v = vec_add( dct2v, dct5v); \
179 /* int s34 = SRC(3) + SRC(4); */ \
180 vec_s16_t s34v = vec_add( dct3v, dct4v); \
181 \
182 /* int a0 = s07 + s34; */ \
183 vec_s16_t a0v = vec_add(s07v, s34v); \
184 /* int a1 = s16 + s25; */ \
185 vec_s16_t a1v = vec_add(s16v, s25v); \
186 /* int a2 = s07 - s34; */ \
187 vec_s16_t a2v = vec_sub(s07v, s34v); \
188 /* int a3 = s16 - s25; */ \
189 vec_s16_t a3v = vec_sub(s16v, s25v); \
190 \
191 /* int d07 = SRC(0) - SRC(7); */ \
192 vec_s16_t d07v = vec_sub( dct0v, dct7v); \
193 /* int d16 = SRC(1) - SRC(6); */ \
194 vec_s16_t d16v = vec_sub( dct1v, dct6v); \
195 /* int d25 = SRC(2) - SRC(5); */ \
196 vec_s16_t d25v = vec_sub( dct2v, dct5v); \
197 /* int d34 = SRC(3) - SRC(4); */ \
198 vec_s16_t d34v = vec_sub( dct3v, dct4v); \
199 \
200 /* int a4 = d16 + d25 + (d07 + (d07>>1)); */ \
201 vec_s16_t a4v = vec_add( vec_add(d16v, d25v), vec_add(d07v, vec_sra(d07v, onev)) );\
202 /* int a5 = d07 - d34 - (d25 + (d25>>1)); */ \
203 vec_s16_t a5v = vec_sub( vec_sub(d07v, d34v), vec_add(d25v, vec_sra(d25v, onev)) );\
204 /* int a6 = d07 + d34 - (d16 + (d16>>1)); */ \
205 vec_s16_t a6v = vec_sub( vec_add(d07v, d34v), vec_add(d16v, vec_sra(d16v, onev)) );\
206 /* int a7 = d16 - d25 + (d34 + (d34>>1)); */ \
207 vec_s16_t a7v = vec_add( vec_sub(d16v, d25v), vec_add(d34v, vec_sra(d34v, onev)) );\
208 \
209 /* DST(0) = a0 + a1; */ \
210 dct0v = vec_add( a0v, a1v ); \
211 /* DST(1) = a4 + (a7>>2); */ \
212 dct1v = vec_add( a4v, vec_sra(a7v, twov) ); \
213 /* DST(2) = a2 + (a3>>1); */ \
214 dct2v = vec_add( a2v, vec_sra(a3v, onev) ); \
215 /* DST(3) = a5 + (a6>>2); */ \
216 dct3v = vec_add( a5v, vec_sra(a6v, twov) ); \
217 /* DST(4) = a0 - a1; */ \
218 dct4v = vec_sub( a0v, a1v ); \
219 /* DST(5) = a6 - (a5>>2); */ \
220 dct5v = vec_sub( a6v, vec_sra(a5v, twov) ); \
221 /* DST(6) = (a2>>1) - a3 ; */ \
222 dct6v = vec_sub( vec_sra(a2v, onev), a3v ); \
223 /* DST(7) = (a4>>2) - a7 ; */ \
224 dct7v = vec_sub( vec_sra(a4v, twov), a7v ); \
225 }
226
227
x264_sub8x8_dct8_altivec(int16_t dct[64],uint8_t * pix1,uint8_t * pix2)228 void x264_sub8x8_dct8_altivec( int16_t dct[64], uint8_t *pix1, uint8_t *pix2 )
229 {
230 vec_u16_t onev = vec_splat_u16(1);
231 vec_u16_t twov = vec_add( onev, onev );
232
233 PREP_DIFF_8BYTEALIGNED;
234
235 vec_s16_t dct0v, dct1v, dct2v, dct3v,
236 dct4v, dct5v, dct6v, dct7v;
237
238 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct0v );
239 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct1v );
240 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct2v );
241 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct3v );
242
243 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct4v );
244 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct5v );
245 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct6v );
246 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct7v );
247
248 DCT8_1D_ALTIVEC( dct0v, dct1v, dct2v, dct3v,
249 dct4v, dct5v, dct6v, dct7v );
250
251 vec_s16_t dct_tr0v, dct_tr1v, dct_tr2v, dct_tr3v,
252 dct_tr4v, dct_tr5v, dct_tr6v, dct_tr7v;
253
254 VEC_TRANSPOSE_8(dct0v, dct1v, dct2v, dct3v,
255 dct4v, dct5v, dct6v, dct7v,
256 dct_tr0v, dct_tr1v, dct_tr2v, dct_tr3v,
257 dct_tr4v, dct_tr5v, dct_tr6v, dct_tr7v );
258
259 DCT8_1D_ALTIVEC( dct_tr0v, dct_tr1v, dct_tr2v, dct_tr3v,
260 dct_tr4v, dct_tr5v, dct_tr6v, dct_tr7v );
261
262 vec_st( dct_tr0v, 0, dct );
263 vec_st( dct_tr1v, 16, dct );
264 vec_st( dct_tr2v, 32, dct );
265 vec_st( dct_tr3v, 48, dct );
266
267 vec_st( dct_tr4v, 64, dct );
268 vec_st( dct_tr5v, 80, dct );
269 vec_st( dct_tr6v, 96, dct );
270 vec_st( dct_tr7v, 112, dct );
271 }
272
x264_sub16x16_dct8_altivec(int16_t dct[4][64],uint8_t * pix1,uint8_t * pix2)273 void x264_sub16x16_dct8_altivec( int16_t dct[4][64], uint8_t *pix1, uint8_t *pix2 )
274 {
275 x264_sub8x8_dct8_altivec( dct[0], &pix1[0], &pix2[0] );
276 x264_sub8x8_dct8_altivec( dct[1], &pix1[8], &pix2[8] );
277 x264_sub8x8_dct8_altivec( dct[2], &pix1[8*FENC_STRIDE+0], &pix2[8*FDEC_STRIDE+0] );
278 x264_sub8x8_dct8_altivec( dct[3], &pix1[8*FENC_STRIDE+8], &pix2[8*FDEC_STRIDE+8] );
279 }
280
281
282 /****************************************************************************
283 * IDCT transform:
284 ****************************************************************************/
285
286 #define ALTIVEC_STORE8_DC_SUM_CLIP(dest, dcv) \
287 { \
288 /* unaligned load */ \
289 vec_u8_t dstv = vec_vsx_ld( 0, dest ); \
290 vec_s16_t dcvsum = vec_adds( dcv, vec_u8_to_s16_h( dstv ) ); \
291 vec_u8_t dcvsum8 = vec_packsu( dcvsum, vec_u8_to_s16_l( dstv ) ); \
292 /* unaligned store */ \
293 vec_vsx_st( dcvsum8, 0, dest ); \
294 }
295
x264_add8x8_idct_dc_altivec(uint8_t * p_dst,int16_t dct[4])296 void x264_add8x8_idct_dc_altivec( uint8_t *p_dst, int16_t dct[4] )
297 {
298 vec_s16_t dcv0, dcv1;
299 vec_s16_t v32 = vec_sl( vec_splat_s16( 8 ), vec_splat_u16( 2 ) );
300 vec_u16_t v6 = vec_splat_u16( 6 );
301 vec_s16_t dctv = vec_ld( 0, dct );
302 vec_u8_t dstv0, dstv1, dstv2, dstv3, dstv4, dstv5, dstv6, dstv7;
303 vec_s16_t dcvsum0, dcvsum1, dcvsum2, dcvsum3, dcvsum4, dcvsum5, dcvsum6, dcvsum7;
304 vec_u8_t dcvsum8_0, dcvsum8_1, dcvsum8_2, dcvsum8_3, dcvsum8_4, dcvsum8_5, dcvsum8_6, dcvsum8_7;
305 LOAD_ZERO;
306
307 dctv = vec_sra( vec_add( dctv, v32 ), v6 );
308 dcv1 = (vec_s16_t)vec_mergeh( dctv, dctv );
309 dcv0 = (vec_s16_t)vec_mergeh( (vec_s32_t)dcv1, (vec_s32_t)dcv1 );
310 dcv1 = (vec_s16_t)vec_mergel( (vec_s32_t)dcv1, (vec_s32_t)dcv1 );
311
312 dstv0 = vec_vsx_ld( 0, p_dst );
313 dstv4 = vec_vsx_ld( 0, p_dst + 4*FDEC_STRIDE );
314 dstv1 = vec_vsx_ld( 0, p_dst + 1*FDEC_STRIDE );
315 dstv5 = vec_vsx_ld( 0, p_dst + 4*FDEC_STRIDE + 1*FDEC_STRIDE );
316 dstv2 = vec_vsx_ld( 0, p_dst + 2*FDEC_STRIDE);
317 dstv6 = vec_vsx_ld( 0, p_dst + 4*FDEC_STRIDE + 2*FDEC_STRIDE );
318 dstv3 = vec_vsx_ld( 0, p_dst + 3*FDEC_STRIDE);
319 dstv7 = vec_vsx_ld( 0, p_dst + 4*FDEC_STRIDE + 3*FDEC_STRIDE );
320
321 vec_s16_t s0 = vec_u8_to_s16_h( dstv0 );
322 vec_s16_t s1 = vec_u8_to_s16_h( dstv4 );
323 vec_s16_t s2 = vec_u8_to_s16_h( dstv1 );
324 vec_s16_t s3 = vec_u8_to_s16_h( dstv5 );
325 vec_s16_t s4 = vec_u8_to_s16_h( dstv2 );
326 vec_s16_t s5 = vec_u8_to_s16_h( dstv6 );
327 vec_s16_t s6 = vec_u8_to_s16_h( dstv3 );
328 vec_s16_t s7 = vec_u8_to_s16_h( dstv7 );
329 dcvsum0 = vec_adds( dcv0, s0 );
330 dcvsum4 = vec_adds( dcv1, s1 );
331 dcvsum1 = vec_adds( dcv0, s2 );
332 dcvsum5 = vec_adds( dcv1, s3 );
333 dcvsum2 = vec_adds( dcv0, s4 );
334 dcvsum6 = vec_adds( dcv1, s5 );
335 dcvsum3 = vec_adds( dcv0, s6 );
336 dcvsum7 = vec_adds( dcv1, s7 );
337 dcvsum8_0 = vec_packsu( dcvsum0, vec_u8_to_s16_l( dstv0 ) );
338 dcvsum8_1 = vec_packsu( dcvsum1, vec_u8_to_s16_l( dstv1 ) );
339 dcvsum8_2 = vec_packsu( dcvsum2, vec_u8_to_s16_l( dstv2 ) );
340 dcvsum8_3 = vec_packsu( dcvsum3, vec_u8_to_s16_l( dstv3 ) );
341 dcvsum8_4 = vec_packsu( dcvsum4, vec_u8_to_s16_l( dstv4 ) );
342 dcvsum8_5 = vec_packsu( dcvsum5, vec_u8_to_s16_l( dstv5 ) );
343 dcvsum8_6 = vec_packsu( dcvsum6, vec_u8_to_s16_l( dstv6 ) );
344 dcvsum8_7 = vec_packsu( dcvsum7, vec_u8_to_s16_l( dstv7 ) );
345
346 vec_vsx_st( dcvsum8_0, 0, p_dst );
347 vec_vsx_st( dcvsum8_4, 0, p_dst + 4*FDEC_STRIDE );
348 vec_vsx_st( dcvsum8_1, 0, p_dst + 1*FDEC_STRIDE );
349 vec_vsx_st( dcvsum8_5, 0, p_dst + 4*FDEC_STRIDE + 1*FDEC_STRIDE );
350 vec_vsx_st( dcvsum8_2, 0, p_dst + 2*FDEC_STRIDE );
351 vec_vsx_st( dcvsum8_6, 0, p_dst + 4*FDEC_STRIDE + 2*FDEC_STRIDE );
352 vec_vsx_st( dcvsum8_3, 0, p_dst + 3*FDEC_STRIDE );
353 vec_vsx_st( dcvsum8_7, 0, p_dst + 4*FDEC_STRIDE + 3*FDEC_STRIDE );
354 }
355
356 #define LOAD16 \
357 dstv0 = vec_ld( 0, p_dst ); \
358 dstv1 = vec_ld( 0, p_dst + 1*FDEC_STRIDE ); \
359 dstv2 = vec_ld( 0, p_dst + 2*FDEC_STRIDE ); \
360 dstv3 = vec_ld( 0, p_dst + 3*FDEC_STRIDE );
361
362 #define SUM16 \
363 dcvsum0 = vec_adds( dcv0, vec_u8_to_s16_h( dstv0 ) ); \
364 dcvsum4 = vec_adds( dcv1, vec_u8_to_s16_l( dstv0 ) ); \
365 dcvsum1 = vec_adds( dcv0, vec_u8_to_s16_h( dstv1 ) ); \
366 dcvsum5 = vec_adds( dcv1, vec_u8_to_s16_l( dstv1 ) ); \
367 dcvsum2 = vec_adds( dcv0, vec_u8_to_s16_h( dstv2 ) ); \
368 dcvsum6 = vec_adds( dcv1, vec_u8_to_s16_l( dstv2 ) ); \
369 dcvsum3 = vec_adds( dcv0, vec_u8_to_s16_h( dstv3 ) ); \
370 dcvsum7 = vec_adds( dcv1, vec_u8_to_s16_l( dstv3 ) ); \
371 dcvsum8_0 = vec_packsu( dcvsum0, dcvsum4 ); \
372 dcvsum8_1 = vec_packsu( dcvsum1, dcvsum5 ); \
373 dcvsum8_2 = vec_packsu( dcvsum2, dcvsum6 ); \
374 dcvsum8_3 = vec_packsu( dcvsum3, dcvsum7 );
375
376 #define STORE16 \
377 vec_st( dcvsum8_0, 0, p_dst ); \
378 vec_st( dcvsum8_1, 0, p_dst + 1*FDEC_STRIDE ); \
379 vec_st( dcvsum8_2, 0, p_dst + 2*FDEC_STRIDE ); \
380 vec_st( dcvsum8_3, 0, p_dst + 3*FDEC_STRIDE );
381
x264_add16x16_idct_dc_altivec(uint8_t * p_dst,int16_t dct[16])382 void x264_add16x16_idct_dc_altivec( uint8_t *p_dst, int16_t dct[16] )
383 {
384 vec_s16_t dcv0, dcv1;
385 vec_s16_t v32 = vec_sl( vec_splat_s16( 8 ), vec_splat_u16( 2 ) );
386 vec_u16_t v6 = vec_splat_u16( 6 );
387 vec_u8_t dstv0, dstv1, dstv2, dstv3;
388 vec_s16_t dcvsum0, dcvsum1, dcvsum2, dcvsum3, dcvsum4, dcvsum5, dcvsum6, dcvsum7;
389 vec_u8_t dcvsum8_0, dcvsum8_1, dcvsum8_2, dcvsum8_3;
390 LOAD_ZERO;
391
392 for( int i = 0; i < 2; i++ )
393 {
394 vec_s16_t dctv = vec_ld( 0, dct );
395
396 dctv = vec_sra( vec_add( dctv, v32 ), v6 );
397 dcv1 = (vec_s16_t)vec_mergeh( dctv, dctv );
398 dcv0 = (vec_s16_t)vec_mergeh( (vec_s32_t)dcv1, (vec_s32_t)dcv1 );
399 dcv1 = (vec_s16_t)vec_mergel( (vec_s32_t)dcv1, (vec_s32_t)dcv1 );
400 LOAD16;
401 SUM16;
402 STORE16;
403
404 p_dst += 4*FDEC_STRIDE;
405 dcv1 = (vec_s16_t)vec_mergel( dctv, dctv );
406 dcv0 = (vec_s16_t)vec_mergeh( (vec_s32_t)dcv1, (vec_s32_t)dcv1 );
407 dcv1 = (vec_s16_t)vec_mergel( (vec_s32_t)dcv1, (vec_s32_t)dcv1 );
408 LOAD16;
409 SUM16;
410 STORE16;
411
412 dct += 8;
413 p_dst += 4*FDEC_STRIDE;
414 }
415 }
416
417 #define IDCT_1D_ALTIVEC(s0, s1, s2, s3, d0, d1, d2, d3) \
418 { \
419 /* a0 = SRC(0) + SRC(2); */ \
420 vec_s16_t a0v = vec_add(s0, s2); \
421 /* a1 = SRC(0) - SRC(2); */ \
422 vec_s16_t a1v = vec_sub(s0, s2); \
423 /* a2 = (SRC(1)>>1) - SRC(3); */ \
424 vec_s16_t a2v = vec_sub(vec_sra(s1, onev), s3); \
425 /* a3 = (SRC(3)>>1) + SRC(1); */ \
426 vec_s16_t a3v = vec_add(vec_sra(s3, onev), s1); \
427 /* DST(0, a0 + a3); */ \
428 d0 = vec_add(a0v, a3v); \
429 /* DST(1, a1 + a2); */ \
430 d1 = vec_add(a1v, a2v); \
431 /* DST(2, a1 - a2); */ \
432 d2 = vec_sub(a1v, a2v); \
433 /* DST(3, a0 - a3); */ \
434 d3 = vec_sub(a0v, a3v); \
435 }
436
437 #define VEC_LOAD_U8_ADD_S16_STORE_U8(va) \
438 vdst_orig = vec_ld(0, dst); \
439 vdst = vec_perm(vdst_orig, zero_u8v, vdst_mask); \
440 vdst_ss = (vec_s16_t)vec_mergeh(zero_u8v, vdst); \
441 va = vec_add(va, vdst_ss); \
442 va_u8 = vec_s16_to_u8(va); \
443 va_u32 = vec_splat((vec_u32_t)va_u8, 0); \
444 vec_ste(va_u32, element, (uint32_t*)dst);
445
446 #define ALTIVEC_STORE4_SUM_CLIP(dest, idctv) \
447 { \
448 /* unaligned load */ \
449 vec_u8_t dstv = vec_vsx_ld(0, dest); \
450 vec_s16_t idct_sh6 = vec_sra(idctv, sixv); \
451 vec_u16_t dst16 = vec_u8_to_u16_h(dstv); \
452 vec_s16_t idstsum = vec_adds(idct_sh6, (vec_s16_t)dst16); \
453 vec_u8_t idstsum8 = vec_s16_to_u8(idstsum); \
454 /* unaligned store */ \
455 vec_u32_t bodyv = vec_splat((vec_u32_t)idstsum8, 0); \
456 int element = ((unsigned long)dest & 0xf) >> 2; \
457 vec_ste(bodyv, element, (uint32_t *)dest); \
458 }
459
x264_add4x4_idct_altivec(uint8_t * dst,int16_t dct[16])460 void x264_add4x4_idct_altivec( uint8_t *dst, int16_t dct[16] )
461 {
462 vec_u16_t onev = vec_splat_u16(1);
463
464 dct[0] += 32; // rounding for the >>6 at the end
465
466 vec_s16_t s0, s1, s2, s3;
467
468 s0 = vec_ld( 0x00, dct );
469 s1 = vec_sld( s0, s0, 8 );
470 s2 = vec_ld( 0x10, dct );
471 s3 = vec_sld( s2, s2, 8 );
472
473 vec_s16_t d0, d1, d2, d3;
474 IDCT_1D_ALTIVEC( s0, s1, s2, s3, d0, d1, d2, d3 );
475
476 vec_s16_t tr0, tr1, tr2, tr3;
477
478 VEC_TRANSPOSE_4( d0, d1, d2, d3, tr0, tr1, tr2, tr3 );
479
480 vec_s16_t idct0, idct1, idct2, idct3;
481 IDCT_1D_ALTIVEC( tr0, tr1, tr2, tr3, idct0, idct1, idct2, idct3 );
482
483 vec_u16_t sixv = vec_splat_u16(6);
484 LOAD_ZERO;
485
486 ALTIVEC_STORE4_SUM_CLIP( &dst[0*FDEC_STRIDE], idct0 );
487 ALTIVEC_STORE4_SUM_CLIP( &dst[1*FDEC_STRIDE], idct1 );
488 ALTIVEC_STORE4_SUM_CLIP( &dst[2*FDEC_STRIDE], idct2 );
489 ALTIVEC_STORE4_SUM_CLIP( &dst[3*FDEC_STRIDE], idct3 );
490 }
491
x264_add8x8_idct_altivec(uint8_t * p_dst,int16_t dct[4][16])492 void x264_add8x8_idct_altivec( uint8_t *p_dst, int16_t dct[4][16] )
493 {
494 x264_add4x4_idct_altivec( &p_dst[0], dct[0] );
495 x264_add4x4_idct_altivec( &p_dst[4], dct[1] );
496 x264_add4x4_idct_altivec( &p_dst[4*FDEC_STRIDE+0], dct[2] );
497 x264_add4x4_idct_altivec( &p_dst[4*FDEC_STRIDE+4], dct[3] );
498 }
499
x264_add16x16_idct_altivec(uint8_t * p_dst,int16_t dct[16][16])500 void x264_add16x16_idct_altivec( uint8_t *p_dst, int16_t dct[16][16] )
501 {
502 x264_add8x8_idct_altivec( &p_dst[0], &dct[0] );
503 x264_add8x8_idct_altivec( &p_dst[8], &dct[4] );
504 x264_add8x8_idct_altivec( &p_dst[8*FDEC_STRIDE+0], &dct[8] );
505 x264_add8x8_idct_altivec( &p_dst[8*FDEC_STRIDE+8], &dct[12] );
506 }
507
508 #define IDCT8_1D_ALTIVEC(s0, s1, s2, s3, s4, s5, s6, s7, d0, d1, d2, d3, d4, d5, d6, d7)\
509 {\
510 /* a0 = SRC(0) + SRC(4); */ \
511 vec_s16_t a0v = vec_add(s0, s4); \
512 /* a2 = SRC(0) - SRC(4); */ \
513 vec_s16_t a2v = vec_sub(s0, s4); \
514 /* a4 = (SRC(2)>>1) - SRC(6); */ \
515 vec_s16_t a4v = vec_sub(vec_sra(s2, onev), s6); \
516 /* a6 = (SRC(6)>>1) + SRC(2); */ \
517 vec_s16_t a6v = vec_add(vec_sra(s6, onev), s2); \
518 /* b0 = a0 + a6; */ \
519 vec_s16_t b0v = vec_add(a0v, a6v); \
520 /* b2 = a2 + a4; */ \
521 vec_s16_t b2v = vec_add(a2v, a4v); \
522 /* b4 = a2 - a4; */ \
523 vec_s16_t b4v = vec_sub(a2v, a4v); \
524 /* b6 = a0 - a6; */ \
525 vec_s16_t b6v = vec_sub(a0v, a6v); \
526 /* a1 = SRC(5) - SRC(3) - SRC(7) - (SRC(7)>>1); */ \
527 /* a1 = (SRC(5)-SRC(3)) - (SRC(7) + (SRC(7)>>1)); */ \
528 vec_s16_t a1v = vec_sub( vec_sub(s5, s3), vec_add(s7, vec_sra(s7, onev)) );\
529 /* a3 = SRC(7) + SRC(1) - SRC(3) - (SRC(3)>>1); */ \
530 /* a3 = (SRC(7)+SRC(1)) - (SRC(3) + (SRC(3)>>1)); */ \
531 vec_s16_t a3v = vec_sub( vec_add(s7, s1), vec_add(s3, vec_sra(s3, onev)) );\
532 /* a5 = SRC(7) - SRC(1) + SRC(5) + (SRC(5)>>1); */ \
533 /* a5 = (SRC(7)-SRC(1)) + SRC(5) + (SRC(5)>>1); */ \
534 vec_s16_t a5v = vec_add( vec_sub(s7, s1), vec_add(s5, vec_sra(s5, onev)) );\
535 /* a7 = SRC(5)+SRC(3) + SRC(1) + (SRC(1)>>1); */ \
536 vec_s16_t a7v = vec_add( vec_add(s5, s3), vec_add(s1, vec_sra(s1, onev)) );\
537 /* b1 = (a7>>2) + a1; */ \
538 vec_s16_t b1v = vec_add( vec_sra(a7v, twov), a1v); \
539 /* b3 = a3 + (a5>>2); */ \
540 vec_s16_t b3v = vec_add(a3v, vec_sra(a5v, twov)); \
541 /* b5 = (a3>>2) - a5; */ \
542 vec_s16_t b5v = vec_sub( vec_sra(a3v, twov), a5v); \
543 /* b7 = a7 - (a1>>2); */ \
544 vec_s16_t b7v = vec_sub( a7v, vec_sra(a1v, twov)); \
545 /* DST(0, b0 + b7); */ \
546 d0 = vec_add(b0v, b7v); \
547 /* DST(1, b2 + b5); */ \
548 d1 = vec_add(b2v, b5v); \
549 /* DST(2, b4 + b3); */ \
550 d2 = vec_add(b4v, b3v); \
551 /* DST(3, b6 + b1); */ \
552 d3 = vec_add(b6v, b1v); \
553 /* DST(4, b6 - b1); */ \
554 d4 = vec_sub(b6v, b1v); \
555 /* DST(5, b4 - b3); */ \
556 d5 = vec_sub(b4v, b3v); \
557 /* DST(6, b2 - b5); */ \
558 d6 = vec_sub(b2v, b5v); \
559 /* DST(7, b0 - b7); */ \
560 d7 = vec_sub(b0v, b7v); \
561 }
562
563 #define ALTIVEC_STORE_SUM_CLIP(dest, idctv) \
564 { \
565 vec_s16_t idct_sh6 = vec_sra( idctv, sixv ); \
566 /* unaligned load */ \
567 vec_u8_t dstv = vec_vsx_ld( 0, dest ); \
568 vec_s16_t idstsum = vec_adds( idct_sh6, vec_u8_to_s16_h( dstv ) ); \
569 vec_u8_t idstsum8 = vec_packsu( idstsum, vec_u8_to_s16_l( dstv ) ); \
570 /* unaligned store */ \
571 vec_vsx_st( idstsum8, 0, dest ); \
572 }
573
x264_add8x8_idct8_altivec(uint8_t * dst,int16_t dct[64])574 void x264_add8x8_idct8_altivec( uint8_t *dst, int16_t dct[64] )
575 {
576 vec_u16_t onev = vec_splat_u16(1);
577 vec_u16_t twov = vec_splat_u16(2);
578
579 dct[0] += 32; // rounding for the >>6 at the end
580
581 vec_s16_t s0, s1, s2, s3, s4, s5, s6, s7;
582
583 s0 = vec_ld(0x00, dct);
584 s1 = vec_ld(0x10, dct);
585 s2 = vec_ld(0x20, dct);
586 s3 = vec_ld(0x30, dct);
587 s4 = vec_ld(0x40, dct);
588 s5 = vec_ld(0x50, dct);
589 s6 = vec_ld(0x60, dct);
590 s7 = vec_ld(0x70, dct);
591
592 vec_s16_t d0, d1, d2, d3, d4, d5, d6, d7;
593 IDCT8_1D_ALTIVEC(s0, s1, s2, s3, s4, s5, s6, s7, d0, d1, d2, d3, d4, d5, d6, d7);
594
595 vec_s16_t tr0, tr1, tr2, tr3, tr4, tr5, tr6, tr7;
596
597 VEC_TRANSPOSE_8( d0, d1, d2, d3, d4, d5, d6, d7,
598 tr0, tr1, tr2, tr3, tr4, tr5, tr6, tr7);
599
600 vec_s16_t idct0, idct1, idct2, idct3, idct4, idct5, idct6, idct7;
601 IDCT8_1D_ALTIVEC(tr0, tr1, tr2, tr3, tr4, tr5, tr6, tr7,
602 idct0, idct1, idct2, idct3, idct4, idct5, idct6, idct7);
603
604 vec_u16_t sixv = vec_splat_u16(6);
605 LOAD_ZERO;
606
607 ALTIVEC_STORE_SUM_CLIP(&dst[0*FDEC_STRIDE], idct0);
608 ALTIVEC_STORE_SUM_CLIP(&dst[1*FDEC_STRIDE], idct1);
609 ALTIVEC_STORE_SUM_CLIP(&dst[2*FDEC_STRIDE], idct2);
610 ALTIVEC_STORE_SUM_CLIP(&dst[3*FDEC_STRIDE], idct3);
611 ALTIVEC_STORE_SUM_CLIP(&dst[4*FDEC_STRIDE], idct4);
612 ALTIVEC_STORE_SUM_CLIP(&dst[5*FDEC_STRIDE], idct5);
613 ALTIVEC_STORE_SUM_CLIP(&dst[6*FDEC_STRIDE], idct6);
614 ALTIVEC_STORE_SUM_CLIP(&dst[7*FDEC_STRIDE], idct7);
615 }
616
x264_add16x16_idct8_altivec(uint8_t * dst,int16_t dct[4][64])617 void x264_add16x16_idct8_altivec( uint8_t *dst, int16_t dct[4][64] )
618 {
619 x264_add8x8_idct8_altivec( &dst[0], dct[0] );
620 x264_add8x8_idct8_altivec( &dst[8], dct[1] );
621 x264_add8x8_idct8_altivec( &dst[8*FDEC_STRIDE+0], dct[2] );
622 x264_add8x8_idct8_altivec( &dst[8*FDEC_STRIDE+8], dct[3] );
623 }
624
x264_zigzag_scan_4x4_frame_altivec(int16_t level[16],int16_t dct[16])625 void x264_zigzag_scan_4x4_frame_altivec( int16_t level[16], int16_t dct[16] )
626 {
627 vec_s16_t dct0v, dct1v;
628 vec_s16_t tmp0v, tmp1v;
629
630 dct0v = vec_ld(0x00, dct);
631 dct1v = vec_ld(0x10, dct);
632
633 const vec_u8_t sel0 = (vec_u8_t) CV(0,1,8,9,2,3,4,5,10,11,16,17,24,25,18,19);
634 const vec_u8_t sel1 = (vec_u8_t) CV(12,13,6,7,14,15,20,21,26,27,28,29,22,23,30,31);
635
636 tmp0v = vec_perm( dct0v, dct1v, sel0 );
637 tmp1v = vec_perm( dct0v, dct1v, sel1 );
638
639 vec_st( tmp0v, 0x00, level );
640 vec_st( tmp1v, 0x10, level );
641 }
642
x264_zigzag_scan_4x4_field_altivec(int16_t level[16],int16_t dct[16])643 void x264_zigzag_scan_4x4_field_altivec( int16_t level[16], int16_t dct[16] )
644 {
645 vec_s16_t dct0v, dct1v;
646 vec_s16_t tmp0v, tmp1v;
647
648 dct0v = vec_ld(0x00, dct);
649 dct1v = vec_ld(0x10, dct);
650
651 const vec_u8_t sel0 = (vec_u8_t) CV(0,1,2,3,8,9,4,5,6,7,10,11,12,13,14,15);
652
653 tmp0v = vec_perm( dct0v, dct1v, sel0 );
654 tmp1v = dct1v;
655
656 vec_st( tmp0v, 0x00, level );
657 vec_st( tmp1v, 0x10, level );
658 }
659
x264_zigzag_scan_8x8_frame_altivec(int16_t level[64],int16_t dct[64])660 void x264_zigzag_scan_8x8_frame_altivec( int16_t level[64], int16_t dct[64] )
661 {
662 vec_s16_t tmpv[6];
663 vec_s16_t dct0v = vec_ld( 0*16, dct );
664 vec_s16_t dct1v = vec_ld( 1*16, dct );
665 vec_s16_t dct2v = vec_ld( 2*16, dct );
666 vec_s16_t dct3v = vec_ld( 3*16, dct );
667 vec_s16_t dct4v = vec_ld( 4*16, dct );
668 vec_s16_t dct5v = vec_ld( 5*16, dct );
669 vec_s16_t dct6v = vec_ld( 6*16, dct );
670 vec_s16_t dct7v = vec_ld( 7*16, dct );
671
672 const vec_u8_t mask1[14] = {
673 { 0x00, 0x01, 0x02, 0x03, 0x12, 0x13, 0x14, 0x15, 0x0A, 0x0B, 0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D },
674 { 0x0A, 0x0B, 0x0C, 0x0D, 0x00, 0x00, 0x0E, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x10, 0x11, 0x12, 0x13 },
675 { 0x00, 0x01, 0x02, 0x03, 0x18, 0x19, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F },
676 { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x18, 0x19, 0x16, 0x17, 0x0C, 0x0D, 0x0E, 0x0F },
677 { 0x00, 0x00, 0x14, 0x15, 0x18, 0x19, 0x02, 0x03, 0x04, 0x05, 0x08, 0x09, 0x06, 0x07, 0x12, 0x13 },
678 { 0x12, 0x13, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F },
679 { 0x1A, 0x1B, 0x10, 0x11, 0x08, 0x09, 0x04, 0x05, 0x02, 0x03, 0x0C, 0x0D, 0x14, 0x15, 0x18, 0x19 },
680 { 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x0A, 0x0B },
681 { 0x00, 0x01, 0x02, 0x03, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, 0x06, 0x07, 0x04, 0x05, 0x08, 0x09 },
682 { 0x00, 0x11, 0x16, 0x17, 0x18, 0x19, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x1A, 0x1B },
683 { 0x02, 0x03, 0x18, 0x19, 0x16, 0x17, 0x1A, 0x1B, 0x1C, 0x1D, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09 },
684 { 0x08, 0x09, 0x0A, 0x0B, 0x06, 0x07, 0x0E, 0x0F, 0x10, 0x11, 0x00, 0x00, 0x12, 0x13, 0x14, 0x15 },
685 { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x16, 0x17, 0x0C, 0x0D, 0x0E, 0x0F },
686 { 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, 0x08, 0x09, 0x06, 0x07, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F }
687 };
688
689 tmpv[0] = vec_mergeh( dct0v, dct1v );
690 tmpv[1] = vec_mergeh( dct2v, dct3v );
691 tmpv[2] = (vec_s16_t)vec_mergeh( (vec_s32_t)tmpv[0], (vec_s32_t)tmpv[1] );
692 tmpv[3] = vec_perm( tmpv[2], dct0v, mask1[0] );
693 vec_st( tmpv[3], 0*16, level );
694
695 tmpv[4] = vec_mergeh( dct4v, dct5v );
696 tmpv[3] = vec_perm( tmpv[0], tmpv[4], mask1[1] );
697 tmpv[3] = vec_perm( tmpv[3], dct0v, mask1[2] );
698 tmpv[3] = vec_perm( tmpv[3], tmpv[1], mask1[3] );
699 vec_st( tmpv[3], 1*16, level );
700
701 tmpv[3] = vec_mergel( dct0v, dct1v );
702 tmpv[1] = vec_mergel( tmpv[1], dct2v );
703 tmpv[5] = vec_perm( tmpv[3], tmpv[1], mask1[4] );
704 tmpv[5] = vec_perm( tmpv[5], dct4v, mask1[5] );
705 vec_st( tmpv[5], 2*16, level );
706
707 tmpv[2] = vec_mergeh( dct5v, dct6v );
708 tmpv[5] = vec_mergeh( tmpv[2], dct7v );
709 tmpv[4] = vec_mergel( tmpv[4], tmpv[1] );
710 tmpv[0] = vec_perm( tmpv[5], tmpv[4], mask1[6] );
711 vec_st( tmpv[0], 3*16, level );
712
713 tmpv[1] = vec_mergel( dct2v, dct3v );
714 tmpv[0] = vec_mergel( dct4v, dct5v );
715 tmpv[4] = vec_perm( tmpv[1], tmpv[0], mask1[7] );
716 tmpv[3] = vec_perm( tmpv[4], tmpv[3], mask1[8] );
717 vec_st( tmpv[3], 4*16, level );
718
719 tmpv[3] = vec_mergeh( dct6v, dct7v );
720 tmpv[2] = vec_mergel( dct3v, dct4v );
721 tmpv[2] = vec_perm( tmpv[2], dct5v, mask1[9] );
722 tmpv[3] = vec_perm( tmpv[2], tmpv[3], mask1[10] );
723 vec_st( tmpv[3], 5*16, level );
724
725 tmpv[1] = vec_mergel( tmpv[1], tmpv[2] );
726 tmpv[2] = vec_mergel( dct6v, dct7v );
727 tmpv[1] = vec_perm( tmpv[1], tmpv[2], mask1[11] );
728 tmpv[1] = vec_perm( tmpv[1], dct7v, mask1[12] );
729 vec_st( tmpv[1], 6*16, level );
730
731 tmpv[2] = vec_perm( tmpv[2], tmpv[0], mask1[13] );
732 vec_st( tmpv[2], 7*16, level );
733 }
734
x264_zigzag_interleave_8x8_cavlc_altivec(int16_t * dst,int16_t * src,uint8_t * nnz)735 void x264_zigzag_interleave_8x8_cavlc_altivec( int16_t *dst, int16_t *src, uint8_t *nnz )
736 {
737 vec_s16_t tmpv[8];
738 vec_s16_t merge[2];
739 vec_s16_t permv[3];
740 vec_s16_t orv[4];
741 vec_s16_t src0v = vec_ld( 0*16, src );
742 vec_s16_t src1v = vec_ld( 1*16, src );
743 vec_s16_t src2v = vec_ld( 2*16, src );
744 vec_s16_t src3v = vec_ld( 3*16, src );
745 vec_s16_t src4v = vec_ld( 4*16, src );
746 vec_s16_t src5v = vec_ld( 5*16, src );
747 vec_s16_t src6v = vec_ld( 6*16, src );
748 vec_s16_t src7v = vec_ld( 7*16, src );
749 vec_u8_t pack;
750 vec_u8_t nnzv = vec_vsx_ld( 0, nnz );
751 vec_u8_t shift = vec_splat_u8( 7 );
752 LOAD_ZERO;
753
754 const vec_u8_t mask[3] = {
755 { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17 },
756 { 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F },
757 { 0x10, 0x11, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x12, 0x13, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F }
758 };
759
760 tmpv[0] = vec_mergeh( src0v, src1v );
761 tmpv[1] = vec_mergel( src0v, src1v );
762
763 tmpv[2] = vec_mergeh( src2v, src3v );
764 tmpv[3] = vec_mergel( src2v, src3v );
765
766 tmpv[4] = vec_mergeh( src4v, src5v );
767 tmpv[5] = vec_mergel( src4v, src5v );
768
769 tmpv[6] = vec_mergeh( src6v, src7v );
770 tmpv[7] = vec_mergel( src6v, src7v );
771
772 merge[0] = vec_mergeh( tmpv[0], tmpv[1] );
773 merge[1] = vec_mergeh( tmpv[2], tmpv[3] );
774 permv[0] = vec_perm( merge[0], merge[1], mask[0] );
775 permv[1] = vec_perm( merge[0], merge[1], mask[1] );
776 vec_st( permv[0], 0*16, dst );
777
778 merge[0] = vec_mergeh( tmpv[4], tmpv[5] );
779 merge[1] = vec_mergeh( tmpv[6], tmpv[7] );
780 permv[0] = vec_perm( merge[0], merge[1], mask[0] );
781 permv[2] = vec_perm( merge[0], merge[1], mask[1] );
782 vec_st( permv[0], 1*16, dst );
783 vec_st( permv[1], 2*16, dst );
784 vec_st( permv[2], 3*16, dst );
785
786 merge[0] = vec_mergel( tmpv[0], tmpv[1] );
787 merge[1] = vec_mergel( tmpv[2], tmpv[3] );
788 permv[0] = vec_perm( merge[0], merge[1], mask[0] );
789 permv[1] = vec_perm( merge[0], merge[1], mask[1] );
790 vec_st( permv[0], 4*16, dst );
791
792 merge[0] = vec_mergel( tmpv[4], tmpv[5] );
793 merge[1] = vec_mergel( tmpv[6], tmpv[7] );
794 permv[0] = vec_perm( merge[0], merge[1], mask[0] );
795 permv[2] = vec_perm( merge[0], merge[1], mask[1] );
796 vec_st( permv[0], 5*16, dst );
797 vec_st( permv[1], 6*16, dst );
798 vec_st( permv[2], 7*16, dst );
799
800 orv[0] = vec_or( src0v, src1v );
801 orv[1] = vec_or( src2v, src3v );
802 orv[2] = vec_or( src4v, src5v );
803 orv[3] = vec_or( src6v, src7v );
804
805 permv[0] = vec_or( orv[0], orv[1] );
806 permv[1] = vec_or( orv[2], orv[3] );
807 permv[0] = vec_or( permv[0], permv[1] );
808
809 permv[1] = vec_perm( permv[0], permv[0], mask[1] );
810 permv[0] = vec_or( permv[0], permv[1] );
811
812 pack = (vec_u8_t)vec_packs( permv[0], permv[0] );
813 pack = (vec_u8_t)vec_cmpeq( pack, zerov );
814 pack = vec_nor( pack, zerov );
815 pack = vec_sr( pack, shift );
816 nnzv = vec_perm( nnzv, pack, mask[2] );
817 vec_st( nnzv, 0, nnz );
818 }
819 #endif // !HIGH_BIT_DEPTH
820
821